Initial mega commit of Kayobe
This commit is contained in:
parent
72ba304e78
commit
b654a70e65
10
.gitignore
vendored
10
.gitignore
vendored
@ -13,3 +13,13 @@ ansible/*.retry
|
|||||||
# Others
|
# Others
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.vimrc
|
.vimrc
|
||||||
|
|
||||||
|
# Ansible Galaxy roles
|
||||||
|
ansible/roles/ahuffman.resolv/
|
||||||
|
ansible/roles/jriguera.configdrive/
|
||||||
|
ansible/roles/MichaelRigart.interfaces/
|
||||||
|
ansible/roles/resmo.ntp/
|
||||||
|
ansible/roles/yatesr.timezone/
|
||||||
|
|
||||||
|
# Virtualenv
|
||||||
|
ansible/kolla-venv
|
||||||
|
37
PROBLEMS
37
PROBLEMS
@ -4,6 +4,8 @@ ansible PTY allocation request failed
|
|||||||
In /var/log/secure: Unable to open pty: No such file or directory
|
In /var/log/secure: Unable to open pty: No such file or directory
|
||||||
none /dev/pts devpts gid=5,mode=620 0 0
|
none /dev/pts devpts gid=5,mode=620 0 0
|
||||||
|
|
||||||
|
Seems to be when using docker cp.
|
||||||
|
|
||||||
Kolla patches
|
Kolla patches
|
||||||
=============
|
=============
|
||||||
|
|
||||||
@ -13,3 +15,38 @@ Ironic inspector
|
|||||||
================
|
================
|
||||||
|
|
||||||
Failed to start due to iptables error.
|
Failed to start due to iptables error.
|
||||||
|
See https://bugs.launchpad.net/kolla/+bug/1624457.
|
||||||
|
|
||||||
|
Bare metal provisioning
|
||||||
|
=======================
|
||||||
|
|
||||||
|
- Neutron external network needs configuring and an IP.
|
||||||
|
- install bridge-utils
|
||||||
|
- create br-eth0 with ip, eth0 without IP
|
||||||
|
- create veth pair, set up
|
||||||
|
- plug one end into br-eth0
|
||||||
|
- set neutron_external_interface=patch-br-ex in globals.yml
|
||||||
|
|
||||||
|
- Provisioning network different from API network.
|
||||||
|
Likely we can use the same network for this in future.
|
||||||
|
- added to /etc/kolla/ironic-conductor/ironic.conf:
|
||||||
|
[DEFAULT]
|
||||||
|
api_url=http://<provision_ip>:6385
|
||||||
|
[pxe]
|
||||||
|
tftp_server=<provision_ip>
|
||||||
|
|
||||||
|
- add to /etc/kolla/haproxy/haproxy.cfg:
|
||||||
|
listen ironic_pxe_api
|
||||||
|
bind 10.122.100.252:6385
|
||||||
|
server stg-alaska 10.121.100.252:6385 check inter 2000 rise 2 fall 5
|
||||||
|
|
||||||
|
- iscsi_tcp modprobe required
|
||||||
|
|
||||||
|
- ironic.conf: [agent]deploy_logs_local_path=/var/log/kolla/ironic/deploy
|
||||||
|
|
||||||
|
Bifrost
|
||||||
|
=======
|
||||||
|
|
||||||
|
- Set log_dir=/var/log/kolla/ironic in ironic.conf
|
||||||
|
- Create kolla_logs/ironic, chown ironic:ironic
|
||||||
|
- os_ironic module will not access root_device property.
|
||||||
|
71
Vagrantfile
vendored
Normal file
71
Vagrantfile
vendored
Normal file
@ -0,0 +1,71 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
|
||||||
|
# All Vagrant configuration is done below. The "2" in Vagrant.configure
|
||||||
|
# configures the configuration version (we support older styles for
|
||||||
|
# backwards compatibility). Please don't change it unless you know what
|
||||||
|
# you're doing.
|
||||||
|
Vagrant.configure("2") do |config|
|
||||||
|
# The most common configuration options are documented and commented below.
|
||||||
|
# For a complete reference, please see the online documentation at
|
||||||
|
# https://docs.vagrantup.com.
|
||||||
|
|
||||||
|
# Every Vagrant development environment requires a box. You can search for
|
||||||
|
# boxes at https://atlas.hashicorp.com/search.
|
||||||
|
config.vm.box = "centos/7"
|
||||||
|
|
||||||
|
# Disable automatic box update checking. If you disable this, then
|
||||||
|
# boxes will only be checked for updates when the user runs
|
||||||
|
# `vagrant box outdated`. This is not recommended.
|
||||||
|
# config.vm.box_check_update = false
|
||||||
|
|
||||||
|
# Create a forwarded port mapping which allows access to a specific port
|
||||||
|
# within the machine from a port on the host machine. In the example below,
|
||||||
|
# accessing "localhost:8080" will access port 80 on the guest machine.
|
||||||
|
# config.vm.network "forwarded_port", guest: 80, host: 8080
|
||||||
|
|
||||||
|
# Create a private network, which allows host-only access to the machine
|
||||||
|
# using a specific IP.
|
||||||
|
# config.vm.network "private_network", ip: "192.168.33.10"
|
||||||
|
|
||||||
|
# Create a public network, which generally matched to bridged network.
|
||||||
|
# Bridged networks make the machine appear as another physical device on
|
||||||
|
# your network.
|
||||||
|
# config.vm.network "public_network"
|
||||||
|
|
||||||
|
# Share an additional folder to the guest VM. The first argument is
|
||||||
|
# the path on the host to the actual folder. The second argument is
|
||||||
|
# the path on the guest to mount the folder. And the optional third
|
||||||
|
# argument is a set of non-required options.
|
||||||
|
# config.vm.synced_folder "../data", "/vagrant_data"
|
||||||
|
|
||||||
|
# Provider-specific configuration so you can fine-tune various
|
||||||
|
# backing providers for Vagrant. These expose provider-specific options.
|
||||||
|
# Example for VirtualBox:
|
||||||
|
#
|
||||||
|
config.vm.provider "virtualbox" do |vb|
|
||||||
|
# Display the VirtualBox GUI when booting the machine
|
||||||
|
#vb.gui = true
|
||||||
|
|
||||||
|
# Customize the amount of memory on the VM:
|
||||||
|
vb.memory = "4096"
|
||||||
|
end
|
||||||
|
#
|
||||||
|
# View the documentation for the provider you are using for more
|
||||||
|
# information on available options.
|
||||||
|
|
||||||
|
# Define a Vagrant Push strategy for pushing to Atlas. Other push strategies
|
||||||
|
# such as FTP and Heroku are also available. See the documentation at
|
||||||
|
# https://docs.vagrantup.com/v2/push/atlas.html for more information.
|
||||||
|
# config.push.define "atlas" do |push|
|
||||||
|
# push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME"
|
||||||
|
# end
|
||||||
|
|
||||||
|
# Enable provisioning with a shell script. Additional provisioners such as
|
||||||
|
# Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the
|
||||||
|
# documentation for more information about their specific syntax and use.
|
||||||
|
# config.vm.provision "shell", inline: <<-SHELL
|
||||||
|
# apt-get update
|
||||||
|
# apt-get install -y apache2
|
||||||
|
# SHELL
|
||||||
|
end
|
5
ansible/bootstrap.yml
Normal file
5
ansible/bootstrap.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure configuration management host is bootstrapped
|
||||||
|
hosts: config-mgmt
|
||||||
|
roles:
|
||||||
|
- role: bootstrap
|
5
ansible/disable-selinux.yml
Normal file
5
ansible/disable-selinux.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Disable SELinux and reboot if required
|
||||||
|
hosts: controllers:seed
|
||||||
|
roles:
|
||||||
|
- role: disable-selinux
|
5
ansible/docker.yml
Normal file
5
ansible/docker.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure docker is configured
|
||||||
|
hosts: docker
|
||||||
|
roles:
|
||||||
|
- role: docker
|
22
ansible/dump-config.yml
Normal file
22
ansible/dump-config.yml
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
- hosts: all
|
||||||
|
gather_facts: "{{ gather_facts | default(False) }}"
|
||||||
|
vars:
|
||||||
|
dump_config_path: /tmp/kayobe-dump-config
|
||||||
|
tasks:
|
||||||
|
- name: Create configuration dump directory
|
||||||
|
file:
|
||||||
|
path: "{{ dump_config_path }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Write host config to file
|
||||||
|
local_action:
|
||||||
|
module: copy
|
||||||
|
content: "{{ hostvars[inventory_hostname] | to_nice_yaml }}"
|
||||||
|
dest: "{{ dump_config_path }}/{{ inventory_hostname }}.yml"
|
||||||
|
|
||||||
|
- name: Write merged config to file
|
||||||
|
local_action:
|
||||||
|
module: copy
|
||||||
|
content: "{{ hostvars | merge_config | to_nice_yaml }}"
|
||||||
|
dest: "{{ dump_config_path }}/merged.yml
|
196
ansible/filter_plugins/networks.py
Normal file
196
ansible/filter_plugins/networks.py
Normal file
@ -0,0 +1,196 @@
|
|||||||
|
from ansible import errors
|
||||||
|
import jinja2
|
||||||
|
import netaddr
|
||||||
|
|
||||||
|
|
||||||
|
def _get_hostvar(context, var_name, inventory_hostname=None):
|
||||||
|
if inventory_hostname is None:
|
||||||
|
namespace = context
|
||||||
|
else:
|
||||||
|
if inventory_hostname not in context['hostvars']:
|
||||||
|
raise errors.AnsibleFilterError(
|
||||||
|
"Inventory hostname '%s' not in hostvars" % inventory_hostname)
|
||||||
|
namespace = context["hostvars"][inventory_hostname]
|
||||||
|
return namespace.get(var_name)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_attr(context, name, attr, inventory_hostname=None):
|
||||||
|
var_name = "%s_%s" % (name, attr)
|
||||||
|
return _get_hostvar(context, var_name, inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
def _make_attr_filter(attr):
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def func(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, attr, inventory_hostname)
|
||||||
|
return func
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_vip_address(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'vip_address', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_ip(context, name, inventory_hostname=None):
|
||||||
|
ips = net_attr(context, name, 'ips', inventory_hostname)
|
||||||
|
if ips:
|
||||||
|
if inventory_hostname is None:
|
||||||
|
inventory_hostname = _get_hostvar(context, "inventory_hostname")
|
||||||
|
return ips.get(inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_interface(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'interface', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_cidr(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'cidr', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_gateway(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'gateway', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_allocation_pool_start(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'allocation_pool_start', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_allocation_pool_end(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'allocation_pool_end', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_vlan(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'vlan', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_bridge_ports(context, name, inventory_hostname=None):
|
||||||
|
return net_attr(context, name, 'bridge_ports', inventory_hostname)
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_interface_obj(context, name, inventory_hostname=None):
|
||||||
|
device = net_interface(context, name, inventory_hostname)
|
||||||
|
if not device:
|
||||||
|
raise errors.AnsibleFilterError(
|
||||||
|
"Network interface for network '%s' on host '%s' not found" %
|
||||||
|
(name, inventory_hostname))
|
||||||
|
ip = net_ip(context, name, inventory_hostname)
|
||||||
|
cidr = net_cidr(context, name, inventory_hostname)
|
||||||
|
netmask = str(netaddr.IPNetwork(cidr).netmask)
|
||||||
|
gateway = net_gateway(context, name, inventory_hostname)
|
||||||
|
vlan = net_vlan(context, name, inventory_hostname)
|
||||||
|
interface = {
|
||||||
|
'device': device,
|
||||||
|
'address': ip,
|
||||||
|
'netmask': netmask,
|
||||||
|
'gateway': gateway,
|
||||||
|
'vlan': vlan,
|
||||||
|
'bootproto': 'static',
|
||||||
|
'onboot': 'yes',
|
||||||
|
}
|
||||||
|
interface = {k: v for k, v in interface.items() if v is not None}
|
||||||
|
return interface
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_bridge_obj(context, name, inventory_hostname=None):
|
||||||
|
device = net_interface(context, name, inventory_hostname)
|
||||||
|
if not device:
|
||||||
|
raise errors.AnsibleFilterError(
|
||||||
|
"Network interface for network '%s' on host '%s' not found" %
|
||||||
|
(name, inventory_hostname))
|
||||||
|
ip = net_ip(context, name, inventory_hostname)
|
||||||
|
cidr = net_cidr(context, name, inventory_hostname)
|
||||||
|
netmask = str(netaddr.IPNetwork(cidr).netmask)
|
||||||
|
gateway = net_gateway(context, name, inventory_hostname)
|
||||||
|
vlan = net_vlan(context, name, inventory_hostname)
|
||||||
|
ports = net_bridge_ports(context, name, inventory_hostname)
|
||||||
|
interface = {
|
||||||
|
'device': device,
|
||||||
|
'address': ip,
|
||||||
|
'netmask': netmask,
|
||||||
|
'gateway': gateway,
|
||||||
|
'vlan': vlan,
|
||||||
|
'ports': ports,
|
||||||
|
'bootproto': 'static',
|
||||||
|
'onboot': 'yes',
|
||||||
|
}
|
||||||
|
interface = {k: v for k, v in interface.items() if v is not None}
|
||||||
|
return interface
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_is_ether(context, name, inventory_hostname=None):
|
||||||
|
return net_bridge_ports(context, name) is None
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_is_bridge(context, name, inventory_hostname=None):
|
||||||
|
return net_bridge_ports(context, name) is not None
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_select_ethers(context, names):
|
||||||
|
return [name for name in names if net_is_ether(context, name)]
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_select_bridges(context, names):
|
||||||
|
return [name for name in names if net_is_bridge(context, name)]
|
||||||
|
|
||||||
|
|
||||||
|
@jinja2.contextfilter
|
||||||
|
def net_configdrive_network_device(context, name, inventory_hostname=None):
|
||||||
|
device = net_interface(context, name, inventory_hostname)
|
||||||
|
if not device:
|
||||||
|
raise errors.AnsibleFilterError(
|
||||||
|
"Network interface for network '%s' on host '%s' not found" %
|
||||||
|
(name, inventory_hostname))
|
||||||
|
ip = net_ip(context, name, inventory_hostname)
|
||||||
|
cidr = net_cidr(context, name, inventory_hostname)
|
||||||
|
netmask = str(netaddr.IPNetwork(cidr).netmask) if cidr is not None else None
|
||||||
|
gateway = net_gateway(context, name, inventory_hostname)
|
||||||
|
bootproto = 'static' if ip is not None else 'dhcp'
|
||||||
|
interface = {
|
||||||
|
'device': device,
|
||||||
|
'address': ip,
|
||||||
|
'netmask': netmask,
|
||||||
|
'gateway': gateway,
|
||||||
|
'bootproto': bootproto,
|
||||||
|
}
|
||||||
|
interface = {k: v for k, v in interface.items() if v is not None}
|
||||||
|
return interface
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
"""Networking filters."""
|
||||||
|
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
'net_attr': net_attr,
|
||||||
|
'net_vip_address': net_vip_address,
|
||||||
|
'net_fqdn': _make_attr_filter('fqdn'),
|
||||||
|
'net_ip': net_ip,
|
||||||
|
'net_interface': net_interface,
|
||||||
|
'net_cidr': net_cidr,
|
||||||
|
'net_gateway': net_gateway,
|
||||||
|
'net_allocation_pool_start': net_allocation_pool_start,
|
||||||
|
'net_allocation_pool_end': net_allocation_pool_end,
|
||||||
|
'net_vlan': net_vlan,
|
||||||
|
'net_interface_obj': net_interface_obj,
|
||||||
|
'net_bridge_obj': net_bridge_obj,
|
||||||
|
'net_is_ether': net_is_ether,
|
||||||
|
'net_is_bridge': net_is_bridge,
|
||||||
|
'net_select_ethers': net_select_ethers,
|
||||||
|
'net_select_bridges': net_select_bridges,
|
||||||
|
'net_configdrive_network_device': net_configdrive_network_device,
|
||||||
|
}
|
35
ansible/group_vars/all/bifrost
Normal file
35
ansible/group_vars/all/bifrost
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# Kayobe configuration for Bifrost.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Diskimage-builder configuration.
|
||||||
|
|
||||||
|
# DIB base OS element.
|
||||||
|
kolla_bifrost_dib_os_element: "centos7"
|
||||||
|
|
||||||
|
# List of DIB elements.
|
||||||
|
kolla_bifrost_dib_elements:
|
||||||
|
- "serial-console"
|
||||||
|
- "vm"
|
||||||
|
|
||||||
|
# DIB init element.
|
||||||
|
kolla_bifrost_dib_init_element: "cloud-init-datasources"
|
||||||
|
|
||||||
|
# DIB environment variables.
|
||||||
|
kolla_bifrost_dib_env_vars:
|
||||||
|
DIB_CLOUD_INIT_DATASOURCES: "ConfigDrive"
|
||||||
|
|
||||||
|
# List of DIB packages to install.
|
||||||
|
kolla_bifrost_dib_packages: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Ironic configuration.
|
||||||
|
|
||||||
|
# Whether to enable ipmitool-based drivers.
|
||||||
|
kolla_bifrost_enable_ipmitool_drivers: true
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Inventory configuration.
|
||||||
|
|
||||||
|
# Server inventory for Bifrost.
|
||||||
|
kolla_bifrost_servers: {}
|
21
ansible/group_vars/all/dns
Normal file
21
ansible/group_vars/all/dns
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# DNS.
|
||||||
|
|
||||||
|
# List of DNS nameservers.
|
||||||
|
resolv_nameservers:
|
||||||
|
- 8.8.8.8
|
||||||
|
- 8.8.4.4
|
||||||
|
|
||||||
|
# DNS domain suffix.
|
||||||
|
#resolv_domain:
|
||||||
|
|
||||||
|
# List of DNS search suffixes.
|
||||||
|
#resolv_search:
|
||||||
|
|
||||||
|
# List of IP address and netmask pairs to sort addresses returned by
|
||||||
|
# gethostbyname.
|
||||||
|
#resolv_sortlist:
|
||||||
|
|
||||||
|
# List of DNS options.
|
||||||
|
#resolv_options:
|
11
ansible/group_vars/all/globals
Normal file
11
ansible/group_vars/all/globals
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
# Kayobe global configuration.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Miscellaneous configuration.
|
||||||
|
|
||||||
|
# Path to Kayobe configuration directory.
|
||||||
|
kayobe_config_path: "{{ lookup('env', 'KAYOBE_CONFIG_PATH') | default('/etc/kayobe') }}"
|
||||||
|
|
||||||
|
# Path in which to cache downloaded images.
|
||||||
|
image_cache_path: "{{ ansible_user_dir ~ '/kayobe-image-cache' }}"
|
34
ansible/group_vars/all/kolla
Normal file
34
ansible/group_vars/all/kolla
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Kolla configuration.
|
||||||
|
|
||||||
|
# Path to Kolla configuration directory.
|
||||||
|
kolla_config_path: "{{ lookup('env', 'KOLLA_CONFIG_PATH') | default('/etc/kolla') }}"
|
||||||
|
|
||||||
|
# Path to Kolla node custom configuration directory.
|
||||||
|
kolla_node_custom_config_path: "{{ kolla_config_path }}/config"
|
||||||
|
|
||||||
|
# Kolla base container image distribution.
|
||||||
|
kolla_base_distro: "centos"
|
||||||
|
|
||||||
|
# Kolla installation type: binary or source.
|
||||||
|
kolla_install_type: "binary"
|
||||||
|
|
||||||
|
# Kolla OpenStack release version. This should be a Docker image tag.
|
||||||
|
kolla_openstack_release: "3.0.2"
|
||||||
|
|
||||||
|
# Whether TLS is enabled for the external API endpoints.
|
||||||
|
kolla_enable_tls_external: "no"
|
||||||
|
|
||||||
|
# Path to external API certificate.
|
||||||
|
kolla_external_fqdn_cert:
|
||||||
|
|
||||||
|
# Whether debug logging is enabled.
|
||||||
|
kolla_openstack_logging_debug: "False"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Kolla feature flag configuration.
|
||||||
|
|
||||||
|
kolla_enable_glance: "yes"
|
||||||
|
kolla_enable_ironic: "yes"
|
||||||
|
kolla_enable_swift: "yes"
|
10
ansible/group_vars/all/network
Normal file
10
ansible/group_vars/all/network
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network roles.
|
||||||
|
|
||||||
|
# Network role to network name mappings.
|
||||||
|
provision_oc_net_name: 'provision_oc_net'
|
||||||
|
provision_wl_net_name: 'provision_wl_net'
|
||||||
|
external_net_name: 'external_net'
|
||||||
|
storage_net_name: 'storage_net'
|
||||||
|
storage_mgmt_net_name: 'storage_mgmt_net'
|
34
ansible/group_vars/all/ntp
Normal file
34
ansible/group_vars/all/ntp
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
# Kayobe NTP configuration.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Timezone.
|
||||||
|
|
||||||
|
# Name of the local timezone.
|
||||||
|
timezone: "{{ ansible_date_time.tz }}"
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Network Time Protocol (NTP).
|
||||||
|
|
||||||
|
# List of names of NTP servers.
|
||||||
|
#ntp_config_server:
|
||||||
|
|
||||||
|
# List of NTP restrictions to add to ntp.conf.
|
||||||
|
#ntp_config_restrict:
|
||||||
|
|
||||||
|
# List of addresses for NTP daemon to listen on.
|
||||||
|
#ntp_config_listen:
|
||||||
|
|
||||||
|
# Other NTP configuration options.
|
||||||
|
#ntp_config_filegen:
|
||||||
|
#ntp_config_statistics:
|
||||||
|
#ntp_config_crypto:
|
||||||
|
#ntp_config_includefile:
|
||||||
|
#ntp_config_keys:
|
||||||
|
#ntp_config_trustedkey:
|
||||||
|
#ntp_config_requestkey:
|
||||||
|
#ntp_config_controlkey:
|
||||||
|
#ntp_config_broadcast:
|
||||||
|
#ntp_config_broadcastclient:
|
||||||
|
#ntp_config_multicastclient:
|
||||||
|
#ntp_config_tinker_panic_enabled:
|
29
ansible/group_vars/controllers/network
Normal file
29
ansible/group_vars/controllers/network
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network interface attachments.
|
||||||
|
|
||||||
|
# List of networks to which these nodes are attached.
|
||||||
|
network_interfaces: >
|
||||||
|
{{ (controller_default_network_interfaces +
|
||||||
|
controller_extra_network_interfaces) | unique | list }}
|
||||||
|
|
||||||
|
# List of default networks to which controller nodes are attached.
|
||||||
|
controller_default_network_interfaces: >
|
||||||
|
{{ [provision_oc_net_name,
|
||||||
|
provision_wl_net_name,
|
||||||
|
internal_net_name,
|
||||||
|
external_net_name,
|
||||||
|
storage_net_name,
|
||||||
|
storage_mgmt_net_name] | unique | list }}
|
||||||
|
|
||||||
|
# List of extra networks to which controller nodes are attached.
|
||||||
|
controller_extra_network_interfaces: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Kolla networking.
|
||||||
|
|
||||||
|
# Name of the Neutron OVS bridge for the provisioning network.
|
||||||
|
neutron_bridge_name: "br-ex"
|
||||||
|
|
||||||
|
# External network interface for Neutron.
|
||||||
|
neutron_external_interface: "{{ 'patch-' ~ neutron_bridge_name }}"
|
16
ansible/group_vars/controllers/swift
Normal file
16
ansible/group_vars/controllers/swift
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# OpenStack Swift configuration.
|
||||||
|
|
||||||
|
# Base-2 logarithm of the number of partitions.
|
||||||
|
# i.e. num_partitions=2^<swift_part_power>.
|
||||||
|
swift_part_power: 10
|
||||||
|
|
||||||
|
# Object replication count.
|
||||||
|
swift_replication_count: "{{ [groups['controllers'] | length, 3] | min }}"
|
||||||
|
|
||||||
|
# Minimum time in hours between moving a given partition.
|
||||||
|
swift_min_part_hours: 1
|
||||||
|
|
||||||
|
# Number of Swift Zones.
|
||||||
|
swift_num_zones: 5
|
42
ansible/group_vars/seed/network
Normal file
42
ansible/group_vars/seed/network
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network interface attachments.
|
||||||
|
|
||||||
|
# List of networks to which these nodes are attached.
|
||||||
|
network_interfaces: >
|
||||||
|
{{ (seed_default_network_interfaces +
|
||||||
|
seed_extra_network_interfaces) | unique | list }}
|
||||||
|
|
||||||
|
# List of default networks to which seed nodes are attached.
|
||||||
|
seed_default_network_interfaces: >
|
||||||
|
{{ [provision_oc_net_name] | unique | list }}
|
||||||
|
|
||||||
|
# List of extra networks to which seed nodes are attached.
|
||||||
|
seed_extra_network_interfaces: []
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Network interface definitions.
|
||||||
|
|
||||||
|
# Overcloud provisioning network IP information.
|
||||||
|
# provision_oc_net_interface:
|
||||||
|
# provision_oc_net_bridge_ports:
|
||||||
|
|
||||||
|
# Workload provisioning network IP information.
|
||||||
|
# provision_wl_net_interface:
|
||||||
|
# provision_wl_net_bridge_ports:
|
||||||
|
|
||||||
|
# Internal network IP information.
|
||||||
|
# internal_net_interface:
|
||||||
|
# internal_net_bridge_ports:
|
||||||
|
|
||||||
|
# External network IP information.
|
||||||
|
# external_net_interface:
|
||||||
|
# external_net_bridge_ports:
|
||||||
|
|
||||||
|
# Storage network IP information.
|
||||||
|
# storage_net_interface:
|
||||||
|
# storage_net_bridge_ports:
|
||||||
|
|
||||||
|
# Storage management network IP information.
|
||||||
|
# storage_mgmt_net_interface:
|
||||||
|
# storage_mgmt_net_bridge_ports:
|
54
ansible/group_vars/seed/vm
Normal file
54
ansible/group_vars/seed/vm
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Seed node VM configuration.
|
||||||
|
|
||||||
|
# Name of the seed VM.
|
||||||
|
seed_vm_name: "{{ inventory_hostname }}"
|
||||||
|
|
||||||
|
# Memory in MB.
|
||||||
|
seed_vm_memory_mb: "{{ 16 * 1024 }}"
|
||||||
|
|
||||||
|
# Number of vCPUs.
|
||||||
|
seed_vm_vcpus: 4
|
||||||
|
|
||||||
|
# List of volumes.
|
||||||
|
seed_vm_volumes:
|
||||||
|
- "{{ seed_vm_root_volume }}"
|
||||||
|
- "{{ seed_vm_data_volume }}"
|
||||||
|
|
||||||
|
# Root volume.
|
||||||
|
seed_vm_root_volume:
|
||||||
|
name: "{{ seed_vm_name }}-root"
|
||||||
|
pool: "{{ seed_vm_pool }}"
|
||||||
|
capacity: "{{ seed_vm_root_capacity }}"
|
||||||
|
format: "{{ seed_vm_root_format }}"
|
||||||
|
image: "{{ seed_vm_root_image }}"
|
||||||
|
|
||||||
|
# Data volume.
|
||||||
|
seed_vm_data_volume:
|
||||||
|
name: "{{ seed_vm_name }}-data"
|
||||||
|
pool: "{{ seed_vm_pool }}"
|
||||||
|
capacity: "{{ seed_vm_data_capacity }}"
|
||||||
|
format: "{{ seed_vm_data_format }}"
|
||||||
|
|
||||||
|
# List of network interfaces.
|
||||||
|
seed_vm_interfaces:
|
||||||
|
- network: default
|
||||||
|
|
||||||
|
# Name of the storage pool for the seed VM volumes.
|
||||||
|
seed_vm_pool: default
|
||||||
|
|
||||||
|
# Capacity of the seed VM root volume.
|
||||||
|
seed_vm_root_capacity: 50G
|
||||||
|
|
||||||
|
# Format of the seed VM root volume.
|
||||||
|
seed_vm_root_format: qcow2
|
||||||
|
|
||||||
|
# Base image for the seed VM root volume.
|
||||||
|
seed_vm_root_image:
|
||||||
|
|
||||||
|
# Capacity of the seed VM data volume.
|
||||||
|
seed_vm_data_capacity: 100G
|
||||||
|
|
||||||
|
# Format of the seed VM data volume.
|
||||||
|
seed_vm_data_format: qcow2
|
12
ansible/ip-allocation.yml
Normal file
12
ansible/ip-allocation.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure IP addresses are allocated
|
||||||
|
hosts: controllers
|
||||||
|
gather_facts: no
|
||||||
|
pre_tasks:
|
||||||
|
- set_fact:
|
||||||
|
ip_allocations: "{{ ip_allocations|default([]) + [{'net_name': item, 'cidr': item|net_cidr}] }}"
|
||||||
|
with_items: "{{ network_interfaces }}"
|
||||||
|
roles:
|
||||||
|
- role: ip-allocation
|
||||||
|
ip_allocation_filename: "{{ kayobe_config_path }}/network-allocation.yml"
|
||||||
|
ip_allocation_hostname: "{{ inventory_hostname }}"
|
33
ansible/kolla-bifrost.yml
Normal file
33
ansible/kolla-bifrost.yml
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Kolla Bifrost is configured
|
||||||
|
hosts: config-mgmt
|
||||||
|
vars:
|
||||||
|
kolla_bifrost_extra_globals_path: "{{ kayobe_config_path ~ '/kolla/config/bifrost/bifrost.yml' }}"
|
||||||
|
kolla_bifrost_driver_map:
|
||||||
|
- { name: agent_ipmitool, enabled: "{{ kolla_bifrost_enable_ipmitool_drivers | bool }}" }
|
||||||
|
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check whether a Kolla Bifrost extra globals configuration file exists
|
||||||
|
stat:
|
||||||
|
path: "{{ kolla_bifrost_extra_globals_path }}"
|
||||||
|
register: globals_stat
|
||||||
|
|
||||||
|
- name: Read the Kolla Bifrost extra globals configuration file
|
||||||
|
set_fact:
|
||||||
|
kolla_bifrost_extra_globals: "{{ lookup('template', kolla_bifrost_extra_globals_path) | from_yaml }}"
|
||||||
|
when: globals_stat.stat.exists
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: kolla-bifrost
|
||||||
|
|
||||||
|
# Generate a list of enabled drivers from the map.
|
||||||
|
kolla_bifrost_enabled_drivers: >
|
||||||
|
{{ kolla_bifrost_driver_map | selectattr('enabled') | map(attribute='name') | list }}
|
||||||
|
kolla_bifrost_enable_pxe_drivers: false
|
||||||
|
|
||||||
|
# Network configuration.
|
||||||
|
kolla_bifrost_dhcp_pool_start: "{{ provision_oc_net_name | net_allocation_pool_start }}"
|
||||||
|
kolla_bifrost_dhcp_pool_end: "{{ provision_oc_net_name | net_allocation_pool_end }}"
|
||||||
|
kolla_bifrost_dnsmasq_router: "{{ provision_oc_net_name | net_gateway }}"
|
||||||
|
kolla_bifrost_dnsmasq_dns_servers: "{{ resolv_nameservers | default([]) }}"
|
||||||
|
kolla_bifrost_domain: "{{ resolv_domain | default }}"
|
6
ansible/kolla-host.yml
Normal file
6
ansible/kolla-host.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Kolla hosts are configured
|
||||||
|
hosts: seed:controllers
|
||||||
|
roles:
|
||||||
|
- role: kolla-host
|
||||||
|
kolla_authorized_key: "{{ lookup('file', lookup('env', 'HOME') ~ '/.ssh/id_rsa.pub') }}"
|
26
ansible/kolla-openstack.yml
Normal file
26
ansible/kolla-openstack.yml
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Kolla OpenStack components are configured
|
||||||
|
hosts: config-mgmt
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check whether Kolla extra Glance configuration file exists
|
||||||
|
stat:
|
||||||
|
path: "{{ kayobe_config_path }}/kolla/config/glance.conf"
|
||||||
|
register: glance_stat
|
||||||
|
|
||||||
|
- name: Read the Kolla extra Glance configuration file
|
||||||
|
set_fact:
|
||||||
|
kolla_extra_glance: "{{ lookup('template', '{{ kayobe_config_path }}/kolla/config/glance.conf') }}"
|
||||||
|
when: glance_stat.stat.exists
|
||||||
|
|
||||||
|
- name: Check whether Kolla extra Ironic configuration file exists
|
||||||
|
stat:
|
||||||
|
path: "{{ kayobe_config_path }}/kolla/config/ironic.conf"
|
||||||
|
register: ironic_stat
|
||||||
|
|
||||||
|
- name: Read the Kolla extra Ironic configuration file
|
||||||
|
set_fact:
|
||||||
|
kolla_extra_ironic: "{{ lookup('template', '{{ kayobe_config_path }}/kolla/config/ironic.conf') }}"
|
||||||
|
when: ironic_stat.stat.exists
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: kolla-openstack
|
35
ansible/kolla.yml
Normal file
35
ansible/kolla.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Kolla is configured
|
||||||
|
hosts: config-mgmt
|
||||||
|
vars:
|
||||||
|
controller_host: "{{ groups['controllers'][0] | default() }}"
|
||||||
|
seed_host: "{{ groups['seed'][0] | default() }}"
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check whether a Kolla extra globals configuration file exists
|
||||||
|
stat:
|
||||||
|
path: "{{ kayobe_config_path ~ '/kolla/globals.yml' }}"
|
||||||
|
register: globals_stat
|
||||||
|
|
||||||
|
- name: Read the Kolla extra globals configuration file
|
||||||
|
set_fact:
|
||||||
|
kolla_extra_globals: "{{ lookup('template', kayobe_config_path ~ '/kolla/globals.yml') | from_yaml }}"
|
||||||
|
when: globals_stat.stat.exists
|
||||||
|
roles:
|
||||||
|
- role: kolla
|
||||||
|
|
||||||
|
- role: kolla-build
|
||||||
|
|
||||||
|
- role: kolla-ansible
|
||||||
|
kolla_internal_vip_address: "{{ internal_net_name | net_vip_address }}"
|
||||||
|
kolla_internal_fqdn: "{{ internal_net_name | net_fqdn or kolla_internal_vip_address }}"
|
||||||
|
kolla_external_vip_address: "{{ external_net_name | net_vip_address }}"
|
||||||
|
kolla_external_fqdn: "{{ external_net_name | net_fqdn or kolla_external_vip_address }}"
|
||||||
|
kolla_network_interface: "{% if controller_host %}{{ internal_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
|
||||||
|
kolla_external_vip_interface: "{% if controller_host %}{{ external_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
|
||||||
|
kolla_api_interface: "{% if controller_host %}{{ internal_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
|
||||||
|
kolla_storage_interface: "{% if controller_host %}{{ storage_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
|
||||||
|
kolla_cluster_interface: "{% if controller_host %}{{ storage_mgmt_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
|
||||||
|
kolla_neutron_bridge_name: "{% if controller_host %}{{ hostvars[controller_host]['neutron_bridge_name'] }}{% endif %}"
|
||||||
|
kolla_neutron_external_interface: "{% if controller_host %}{{ hostvars[controller_host]['neutron_external_interface'] }}{% endif %}"
|
||||||
|
kolla_bifrost_network_interface: "{% if seed_host %}{{ provision_oc_net_name | net_interface(seed_host) | replace('-', '_') }}{% endif %}"
|
||||||
|
kolla_provision_interface: "{% if controller_host %}{{ provision_wl_net_name | net_interface(controller_host) | replace('-', '_') }}{% endif %}"
|
47
ansible/network.yml
Normal file
47
ansible/network.yml
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure networking is configured
|
||||||
|
hosts: seed:controllers
|
||||||
|
pre_tasks:
|
||||||
|
- name: Ensure NetworkManager is disabled
|
||||||
|
service:
|
||||||
|
name: NetworkManager
|
||||||
|
state: stopped
|
||||||
|
enabled: no
|
||||||
|
become: True
|
||||||
|
register: nm_result
|
||||||
|
failed_when:
|
||||||
|
- "{{ nm_result | failed }}"
|
||||||
|
# Ugh, Ansible's service module doesn't handle uninstalled services.
|
||||||
|
- "{{ 'Could not find the requested service' not in nm_result.msg }}"
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: ahuffman.resolv
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- role: MichaelRigart.interfaces
|
||||||
|
interfaces_ether_interfaces: >
|
||||||
|
{{ network_interfaces |
|
||||||
|
net_select_ethers |
|
||||||
|
map('net_interface_obj') |
|
||||||
|
list }}
|
||||||
|
interfaces_bridge_interfaces: >
|
||||||
|
{{ network_interfaces |
|
||||||
|
net_select_bridges |
|
||||||
|
map('net_bridge_obj') |
|
||||||
|
list }}
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure controller workload provisioning network is configured
|
||||||
|
hosts: controllers
|
||||||
|
roles:
|
||||||
|
# Configure a virtual ethernet patch link to connect the workload provision
|
||||||
|
# network bridge to the Neutron OVS bridge.
|
||||||
|
- role: veth
|
||||||
|
veth_interfaces:
|
||||||
|
- device: "{{ 'patch-' ~ provision_wl_net_name | net_interface }}"
|
||||||
|
bootproto: "static"
|
||||||
|
bridge: "{{ provision_wl_net_name | net_interface }}"
|
||||||
|
peer_device: "{{ 'patch-' ~ neutron_bridge_name }}"
|
||||||
|
peer_bootproto: "static"
|
||||||
|
onboot: yes
|
||||||
|
when: "{{ provision_wl_net_name in network_interfaces }}"
|
8
ansible/ntp.yml
Normal file
8
ansible/ntp.yml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure NTP is installed and configured
|
||||||
|
hosts: seed:controllers
|
||||||
|
roles:
|
||||||
|
- role: yatesr.timezone
|
||||||
|
become: True
|
||||||
|
- role: resmo.ntp
|
||||||
|
become: True
|
6
ansible/requirements.yml
Normal file
6
ansible/requirements.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
- src: ahuffman.resolv
|
||||||
|
- src: jriguera.configdrive
|
||||||
|
- src: MichaelRigart.interfaces
|
||||||
|
- src: resmo.ntp
|
||||||
|
- src: yatesr.timezone
|
39
ansible/roles/bootstrap/tasks/main.yml
Normal file
39
ansible/roles/bootstrap/tasks/main.yml
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure required packages are installed
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- git
|
||||||
|
- vim
|
||||||
|
|
||||||
|
- name: Check whether an SSH key exists
|
||||||
|
stat:
|
||||||
|
path: "{{ ansible_user_dir }}/.ssh/id_rsa"
|
||||||
|
register: ssh_key_stat
|
||||||
|
|
||||||
|
- name: Generate an SSH key
|
||||||
|
command: ssh-keygen -t rsa -N '' -f {{ ansible_user_dir }}/.ssh/id_rsa
|
||||||
|
when: not ssh_key_stat.stat.exists
|
||||||
|
|
||||||
|
- name: Ensure SSH public key is in authorized keys
|
||||||
|
authorized_key:
|
||||||
|
user: "{{ ansible_user_id }}"
|
||||||
|
key: "{{ lookup('file', ansible_user_dir ~ '/.ssh/id_rsa.pub') }}"
|
||||||
|
|
||||||
|
- name: Scan for SSH keys
|
||||||
|
command: ssh-keyscan {{ item }}
|
||||||
|
with_items:
|
||||||
|
- localhost
|
||||||
|
- 127.0.0.1
|
||||||
|
register: keyscan_result
|
||||||
|
changed_when: False
|
||||||
|
|
||||||
|
- name: Ensure SSH keys are in known hosts
|
||||||
|
known_hosts:
|
||||||
|
host: "{{ item[0].item }}"
|
||||||
|
key: "{{ item[1] }}"
|
||||||
|
with_subelements:
|
||||||
|
- "{{ keyscan_result.results }}"
|
||||||
|
- stdout_lines
|
56
ansible/roles/disable-selinux/tasks/main.yml
Normal file
56
ansible/roles/disable-selinux/tasks/main.yml
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure required packages are installed
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- libselinux-python
|
||||||
|
|
||||||
|
- name: Ensure SELinux is disabled
|
||||||
|
selinux:
|
||||||
|
state: disabled
|
||||||
|
register: selinux_result
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Set a fact to determine whether we are running locally
|
||||||
|
set_fact:
|
||||||
|
is_local: "{{ lookup('pipe', 'hostname') in [ansible_hostname, ansible_nodename] }}"
|
||||||
|
when: "{{ selinux_result | changed }}"
|
||||||
|
|
||||||
|
# Any SSH connection errors cause ansible to fail the task. We therefore
|
||||||
|
# perform a manual SSH connection and allow the command to fail.
|
||||||
|
- name: Reboot the system to apply SELinux changes (remote)
|
||||||
|
local_action:
|
||||||
|
# Use -tt to force a pseudo tty.
|
||||||
|
module: >
|
||||||
|
command
|
||||||
|
ssh -tt {{ ansible_user }}@{{ ansible_host | default(inventory_hostname) }}
|
||||||
|
sudo shutdown -r now "Applying SELinux changes"
|
||||||
|
register: reboot_result
|
||||||
|
failed_when:
|
||||||
|
- "{{ reboot_result | failed }}"
|
||||||
|
- "{{ 'closed by remote host' not in reboot_result.stderr }}"
|
||||||
|
when:
|
||||||
|
- "{{ selinux_result | changed }}"
|
||||||
|
- "{{ not is_local | bool }}"
|
||||||
|
|
||||||
|
- name: Reboot the system to apply SELinux changes (local)
|
||||||
|
command: shutdown -r now "Applying SELinux changes"
|
||||||
|
become: True
|
||||||
|
when:
|
||||||
|
- "{{ selinux_result | changed }}"
|
||||||
|
- "{{ is_local | bool }}"
|
||||||
|
|
||||||
|
# If we're running this locally we won't get here.
|
||||||
|
- name: Wait for the system to boot up (remote)
|
||||||
|
local_action:
|
||||||
|
module: wait_for
|
||||||
|
host: "{{ ansible_host | default(inventory_hostname) }}"
|
||||||
|
port: 22
|
||||||
|
state: started
|
||||||
|
# Wait for 10 seconds before polling to ensure the node has shutdown.
|
||||||
|
delay: 10
|
||||||
|
when:
|
||||||
|
- "{{ selinux_result | changed }}"
|
||||||
|
- "{{ not is_local | bool }}"
|
7
ansible/roles/docker/tasks/main.yml
Normal file
7
ansible/roles/docker/tasks/main.yml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure user is in the docker group
|
||||||
|
user:
|
||||||
|
name: "{{ ansible_user_id }}"
|
||||||
|
groups: docker
|
||||||
|
append: yes
|
||||||
|
become: True
|
120
ansible/roles/ip-allocation/library/ip_allocation.py
Normal file
120
ansible/roles/ip-allocation/library/ip_allocation.py
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
#!/usr/bin/python
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# Store a list of import errors to report to the user.
|
||||||
|
IMPORT_ERRORS=[]
|
||||||
|
try:
|
||||||
|
import netaddr
|
||||||
|
except Exception as e:
|
||||||
|
IMPORT_ERRORS.append(e)
|
||||||
|
try:
|
||||||
|
import yaml
|
||||||
|
except Exception as e:
|
||||||
|
IMPORT_ERRORS.append(e)
|
||||||
|
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
WM
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
WM
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def read_allocations(module):
|
||||||
|
"""Read IP address allocations from the allocation file."""
|
||||||
|
filename = module.params['allocation_file']
|
||||||
|
try:
|
||||||
|
with open(filename, 'r') as f:
|
||||||
|
return yaml.load(f)
|
||||||
|
except IOError as e:
|
||||||
|
module.fail_json(msg="Failed to open allocation file %s for reading" % filename)
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
module.fail_json(msg="Failed to parse allocation file %s as YAML" % filename)
|
||||||
|
|
||||||
|
|
||||||
|
def write_allocations(module, allocations):
|
||||||
|
"""Write IP address allocations to the allocation file."""
|
||||||
|
filename = module.params['allocation_file']
|
||||||
|
try:
|
||||||
|
with open(filename, 'w') as f:
|
||||||
|
yaml.dump(allocations, f, default_flow_style=False)
|
||||||
|
except IOError as e:
|
||||||
|
module.fail_json(msg="Failed to open allocation file %s for writing" % filename)
|
||||||
|
except yaml.YAMLError as e:
|
||||||
|
module.fail_json(msg="Failed to dump allocation file %s as YAML" % filename)
|
||||||
|
|
||||||
|
|
||||||
|
def update_allocation(module, allocations):
|
||||||
|
"""Allocate an IP address on a network for a host.
|
||||||
|
|
||||||
|
:param module: AnsibleModule instance
|
||||||
|
:param allocations: Existing IP address allocations
|
||||||
|
"""
|
||||||
|
net_name = module.params['net_name']
|
||||||
|
hostname = module.params['hostname']
|
||||||
|
cidr = module.params['cidr']
|
||||||
|
network = netaddr.IPNetwork(cidr)
|
||||||
|
result = {
|
||||||
|
'changed': False,
|
||||||
|
}
|
||||||
|
object_name = "%s_ips" % net_name
|
||||||
|
net_allocations = allocations.setdefault(object_name, {})
|
||||||
|
invalid_allocations = {hn: ip for hn, ip in net_allocations.items()
|
||||||
|
if ip not in network}
|
||||||
|
if invalid_allocations:
|
||||||
|
module.fail_json(msg="Found invalid existing allocations in network %s: %s" %
|
||||||
|
(network, ", ".join("%s: %s" % (hn, ip) for hn, ip in invalid_allocations.items())))
|
||||||
|
if hostname not in net_allocations:
|
||||||
|
result['changed'] = True
|
||||||
|
ips = netaddr.IPSet(net_allocations.values())
|
||||||
|
free_ips = netaddr.IPSet([network]) - ips
|
||||||
|
for free_cidr in free_ips.iter_cidrs():
|
||||||
|
ip = free_cidr[0]
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="No unallocated IP addresses for %s in %s" % (hostname, net_name))
|
||||||
|
free_ips.remove(ip)
|
||||||
|
net_allocations[hostname] = str(ip)
|
||||||
|
result['ip'] = net_allocations[hostname]
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def allocate(module):
|
||||||
|
"""Allocate an IP address for a host, updating the allocation file."""
|
||||||
|
allocations = read_allocations(module)
|
||||||
|
result = update_allocation(module, allocations)
|
||||||
|
if result['changed'] and not module.check_mode:
|
||||||
|
write_allocations(module, allocations)
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
net_name=dict(required=True, type='str'),
|
||||||
|
hostname=dict(required=True, type='str'),
|
||||||
|
cidr=dict(required=True, type='str'),
|
||||||
|
allocation_file=dict(required=True, type='str'),
|
||||||
|
),
|
||||||
|
supports_check_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Fail if there were any exceptions when importing modules.
|
||||||
|
if IMPORT_ERRORS:
|
||||||
|
module.fail_json(msg="Import errors: %s" %
|
||||||
|
", ".join([repr(e) for e in IMPORT_ERRORS]))
|
||||||
|
|
||||||
|
try:
|
||||||
|
results = allocate(module)
|
||||||
|
except Exception as e:
|
||||||
|
module.fail_json(msg="Failed to allocate IP address: %s" % repr(e))
|
||||||
|
else:
|
||||||
|
module.exit_json(**results)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
9
ansible/roles/ip-allocation/tasks/main.yml
Normal file
9
ansible/roles/ip-allocation/tasks/main.yml
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure IP addresses are allocated
|
||||||
|
local_action:
|
||||||
|
module: ip_allocation
|
||||||
|
allocation_file: "{{ ip_allocation_filename }}"
|
||||||
|
hostname: "{{ ip_allocation_hostname }}"
|
||||||
|
net_name: "{{ item.net_name }}"
|
||||||
|
cidr: "{{ item.cidr }}"
|
||||||
|
with_items: "{{ ip_allocations }}"
|
139
ansible/roles/kolla-ansible/defaults/main.yml
Normal file
139
ansible/roles/kolla-ansible/defaults/main.yml
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
---
|
||||||
|
# Virtualenv directory where Kolla will be installed.
|
||||||
|
kolla_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
|
||||||
|
|
||||||
|
# Directory where Kolla config files will be installed.
|
||||||
|
kolla_config_path:
|
||||||
|
|
||||||
|
# Directory where Kolla custom configuration files will be installed.
|
||||||
|
kolla_node_custom_config_path:
|
||||||
|
|
||||||
|
# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
|
||||||
|
kolla_base_distro:
|
||||||
|
|
||||||
|
# Valid options are [ binary, source ]
|
||||||
|
kolla_install_type:
|
||||||
|
|
||||||
|
# Valid option is Docker repository tag
|
||||||
|
kolla_openstack_release:
|
||||||
|
|
||||||
|
# This should be a VIP, an unused IP on your network that will float between
|
||||||
|
# the hosts running keepalived for high-availability. When running an All-In-One
|
||||||
|
# without haproxy and keepalived, this should be the first IP on your
|
||||||
|
# 'network_interface' as set in the Networking section below.
|
||||||
|
kolla_internal_vip_address:
|
||||||
|
|
||||||
|
# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
|
||||||
|
# default it is the same as kolla_internal_vip_address.
|
||||||
|
kolla_internal_fqdn:
|
||||||
|
|
||||||
|
# This should be a VIP, an unused IP on your network that will float between
|
||||||
|
# the hosts running keepalived for high-availability. It defaults to the
|
||||||
|
# kolla_internal_vip_address, allowing internal and external communication to
|
||||||
|
# share the same address. Specify a kolla_external_vip_address to separate
|
||||||
|
# internal and external requests between two VIPs.
|
||||||
|
kolla_external_vip_address:
|
||||||
|
|
||||||
|
# The Public address used to communicate with OpenStack as set in the public_url
|
||||||
|
# for the endpoints that will be created. This DNS name should map to
|
||||||
|
# kolla_external_vip_address.
|
||||||
|
kolla_external_fqdn:
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Networking options
|
||||||
|
####################
|
||||||
|
# This interface is what all your api services will be bound to by default.
|
||||||
|
# Additionally, all vxlan/tunnel and storage network traffic will go over this
|
||||||
|
# interface by default. This interface must contain an IPv4 address.
|
||||||
|
# It is possible for hosts to have non-matching names of interfaces - these can
|
||||||
|
# be set in an inventory file per host or per group or stored separately, see
|
||||||
|
# http://docs.ansible.com/ansible/intro_inventory.html
|
||||||
|
# Yet another way to workaround the naming problem is to create a bond for the
|
||||||
|
# interface on all hosts and give the bond name here. Similar strategy can be
|
||||||
|
# followed for other types of interfaces.
|
||||||
|
kolla_network_interface:
|
||||||
|
|
||||||
|
# These can be adjusted for even more customization. The default is the same as
|
||||||
|
# the 'network_interface'. These interfaces must contain an IPv4 address.
|
||||||
|
kolla_external_vip_interface:
|
||||||
|
kolla_api_interface:
|
||||||
|
kolla_storage_interface:
|
||||||
|
kolla_cluster_interface:
|
||||||
|
|
||||||
|
# This is the raw interface given to neutron as its external network port. Even
|
||||||
|
# though an IP address can exist on this interface, it will be unusable in most
|
||||||
|
# configurations. It is recommended this interface not be configured with any IP
|
||||||
|
# addresses for that reason.
|
||||||
|
kolla_neutron_external_interface:
|
||||||
|
|
||||||
|
# Name of the Neutron external bridge.
|
||||||
|
kolla_neutron_bridge_name:
|
||||||
|
|
||||||
|
# This is the interface to use for Bifrost bare metal provisioning of the
|
||||||
|
# control plane.
|
||||||
|
kolla_bifrost_network_interface:
|
||||||
|
|
||||||
|
# This is the interface to use for bare metal provisioning. It is not a
|
||||||
|
# standard kolla variable.
|
||||||
|
kolla_provision_interface:
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# TLS options
|
||||||
|
####################
|
||||||
|
# To provide encryption and authentication on the kolla_external_vip_interface,
|
||||||
|
# TLS can be enabled. When TLS is enabled, certificates must be provided to
|
||||||
|
# allow clients to perform authentication.
|
||||||
|
kolla_enable_tls_external:
|
||||||
|
kolla_external_fqdn_cert:
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# OpenStack options
|
||||||
|
####################
|
||||||
|
# Use these options to set the various log levels across all OpenStack projects
|
||||||
|
# Valid options are [ True, False ]
|
||||||
|
kolla_openstack_logging_debug:
|
||||||
|
|
||||||
|
# OpenStack services can be enabled or disabled with these options
|
||||||
|
#kolla_enable_aodh:
|
||||||
|
#kolla_enable_barbican:
|
||||||
|
#kolla_enable_ceilometer:
|
||||||
|
#kolla_enable_central_logging:
|
||||||
|
#kolla_enable_ceph:
|
||||||
|
#kolla_enable_ceph_rgw:
|
||||||
|
#kolla_enable_cinder:
|
||||||
|
#kolla_enable_cinder_backend_iscsi:
|
||||||
|
#kolla_enable_cinder_backend_lvm:
|
||||||
|
#kolla_enable_cloudkitty:
|
||||||
|
#kolla_enable_congress:
|
||||||
|
#kolla_enable_etcd:
|
||||||
|
#kolla_enable_gnocchi:
|
||||||
|
#kolla_enable_grafana:
|
||||||
|
#kolla_enable_heat:
|
||||||
|
#kolla_enable_horizon:
|
||||||
|
#kolla_enable_influxdb:
|
||||||
|
#kolla_enable_ironic:
|
||||||
|
#kolla_enable_kuryr:
|
||||||
|
#kolla_enable_magnum:
|
||||||
|
#kolla_enable_manila:
|
||||||
|
#kolla_enable_mistral:
|
||||||
|
#kolla_enable_mongodb:
|
||||||
|
#kolla_enable_murano:
|
||||||
|
#kolla_enable_multipathd:
|
||||||
|
#kolla_enable_neutron_dvr:
|
||||||
|
#kolla_enable_neutron_lbaas:
|
||||||
|
#kolla_enable_neutron_qos:
|
||||||
|
#kolla_enable_neutron_agent_ha:
|
||||||
|
#kolla_enable_neutron_vpnaas:
|
||||||
|
#kolla_enable_rally:
|
||||||
|
#kolla_enable_sahara:
|
||||||
|
#kolla_enable_senlin:
|
||||||
|
#kolla_enable_swift:
|
||||||
|
#kolla_enable_telegraf:
|
||||||
|
#kolla_enable_tempest:
|
||||||
|
#kolla_enable_watcher:
|
||||||
|
|
||||||
|
# Free form extra configuration to append to {{ kolla_config_path }}/globals.yml.
|
||||||
|
kolla_extra_globals:
|
@ -0,0 +1,10 @@
|
|||||||
|
--- /usr/share/kolla/ansible/roles/baremetal/templates/docker_yum_repo.j2.old 2017-01-10 16:21:05.305626808 -0500
|
||||||
|
+++ /usr/share/kolla/ansible/roles/baremetal/templates/docker_yum_repo.j2 2017-01-10 16:21:09.216645923 -0500
|
||||||
|
@@ -1,6 +1,6 @@
|
||||||
|
[docker-repo]
|
||||||
|
name=Docker main Repository
|
||||||
|
-baseurl={{docker_yum_url}}/repo/main/{{ansible_distribution|lower}}/{{ansible_distribution_major_version|lower}}
|
||||||
|
+baseurl={{docker_yum_url}}/repo/main/{% if ansible_distribution == 'RedHat' %}centos{% else %}{{ansible_distribution|lower}}{% endif %}/{{ansible_distribution_major_version|lower}}
|
||||||
|
enabled=1
|
||||||
|
gpgcheck=1
|
||||||
|
gpgkey={{docker_yum_url}}/gpg
|
@ -0,0 +1,27 @@
|
|||||||
|
--- /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml.old 2017-01-06 17:23:12.444746830 +0000
|
||||||
|
+++ /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml 2017-01-06 17:22:27.864278879 +0000
|
||||||
|
@@ -28,6 +28,7 @@
|
||||||
|
{% for host in groups['all'] %}
|
||||||
|
{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }} {{ hostvars[host]['ansible_hostname'] }}
|
||||||
|
{% endfor %}
|
||||||
|
+ become: True
|
||||||
|
when: customize_etc_hosts | bool
|
||||||
|
|
||||||
|
- name: ensure sudo group is present
|
||||||
|
@@ -126,7 +127,7 @@
|
||||||
|
recurse: yes
|
||||||
|
owner: kolla
|
||||||
|
group: kolla
|
||||||
|
- mode: 755
|
||||||
|
+ mode: 0755
|
||||||
|
become: True
|
||||||
|
when: create_kolla_user | bool == True
|
||||||
|
|
||||||
|
@@ -135,6 +136,6 @@
|
||||||
|
path: /etc/kolla
|
||||||
|
state: directory
|
||||||
|
recurse: yes
|
||||||
|
- mode: 666
|
||||||
|
+ mode: 0666
|
||||||
|
become: True
|
||||||
|
when: create_kolla_user | bool == False
|
11
ansible/roles/kolla-ansible/files/ironic-api.json.j2.patch
Normal file
11
ansible/roles/kolla-ansible/files/ironic-api.json.j2.patch
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
--- /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2.old 2017-01-06 13:56:52.881061188 +0000
|
||||||
|
+++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2 2017-01-06 14:00:21.757338271 +0000
|
||||||
|
@@ -10,7 +10,7 @@
|
||||||
|
],
|
||||||
|
"permissions": [
|
||||||
|
{
|
||||||
|
- "path": "/var/log/kolla/ironic"
|
||||||
|
+ "path": "/var/log/kolla/ironic",
|
||||||
|
"owner": "ironic:ironic",
|
||||||
|
"recurse": true
|
||||||
|
}
|
@ -0,0 +1,11 @@
|
|||||||
|
--- /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2.old 2017-01-06 14:28:35.048365453 +0000
|
||||||
|
+++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2 2017-01-06 14:28:44.858467071 +0000
|
||||||
|
@@ -20,7 +20,7 @@
|
||||||
|
"recurse": true
|
||||||
|
},
|
||||||
|
{
|
||||||
|
- "path": "/tftpboot"
|
||||||
|
+ "path": "/tftpboot",
|
||||||
|
"owner": "ironic:ironic",
|
||||||
|
"recurse": true
|
||||||
|
}
|
11
ansible/roles/kolla-ansible/files/kolla-ansible.patch
Normal file
11
ansible/roles/kolla-ansible/files/kolla-ansible.patch
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
--- ansible/kolla-venv/bin/kolla-ansible.orig 2017-01-29 21:20:29.189225104 +0000
|
||||||
|
+++ ansible/kolla-venv/bin/kolla-ansible 2017-01-31 16:31:07.203695865 +0000
|
||||||
|
@@ -9,6 +9,8 @@
|
||||||
|
BASEDIR=/usr/share/kolla
|
||||||
|
elif [[ ${dir_name} == "/usr/local/bin" ]]; then
|
||||||
|
BASEDIR=/usr/local/share/kolla
|
||||||
|
+ elif [[ ${dir_name} == "${VIRTUAL_ENV}/bin" ]]; then
|
||||||
|
+ BASEDIR="${VIRTUAL_ENV}/share/kolla"
|
||||||
|
else
|
||||||
|
BASEDIR="$(dirname ${dir_name})"
|
||||||
|
fi
|
@ -0,0 +1,11 @@
|
|||||||
|
--- ansible/kolla-venv/share/kolla/ansible/roles/prechecks/tasks/port_checks.yml.orig 2017-01-31 16:26:26.021463306 +0000
|
||||||
|
+++ ansible/kolla-venv/share/kolla/ansible/roles/prechecks/tasks/port_checks.yml 2017-01-31 16:27:22.045711530 +0000
|
||||||
|
@@ -601,7 +601,7 @@
|
||||||
|
changed_when: false
|
||||||
|
failed_when: "'169.254.' not in kolla_internal_vip_address and \
|
||||||
|
kolla_internal_vip_address | ipaddr(ip_addr_output.stdout.split()[3]) is none"
|
||||||
|
- when: enable_haproxy | bool
|
||||||
|
+ when: false
|
||||||
|
|
||||||
|
- name: Checking the network_interface is present
|
||||||
|
fail: "msg='Please check the network_interface property - interface {{ network_interface }} not found'"
|
34
ansible/roles/kolla-ansible/tasks/config.yml
Normal file
34
ansible/roles/kolla-ansible/tasks/config.yml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure the Kolla configuration directores exist
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- "{{ kolla_config_path }}/inventory"
|
||||||
|
- "{{ kolla_node_custom_config_path }}"
|
||||||
|
|
||||||
|
- name: Ensure the Kolla configuration files exist
|
||||||
|
template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ kolla_config_path }}/{{ item.dest }}"
|
||||||
|
mode: 0644
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- { src: seed.j2, dest: inventory/seed }
|
||||||
|
- { src: overcloud.j2, dest: inventory/overcloud }
|
||||||
|
- { src: globals.yml.j2, dest: globals.yml }
|
||||||
|
|
||||||
|
- name: Check whether the Kolla passwords file exists
|
||||||
|
stat:
|
||||||
|
path: "{{ kolla_config_path }}/passwords.yml"
|
||||||
|
register: kolla_passwords_stat
|
||||||
|
|
||||||
|
- name: Generate Kolla passwords
|
||||||
|
shell: >
|
||||||
|
cp {{ kolla_install_dir }}/etc_examples/kolla/passwords.yml {{ kolla_config_path }}/passwords.yml.generated
|
||||||
|
&& {{ kolla_venv }}/bin/kolla-genpwd -p {{ kolla_config_path }}/passwords.yml.generated
|
||||||
|
&& mv {{ kolla_config_path }}/passwords.yml.generated {{ kolla_config_path }}/passwords.yml
|
||||||
|
become: True
|
||||||
|
when: not kolla_passwords_stat.stat.exists
|
19
ansible/roles/kolla-ansible/tasks/install.yml
Normal file
19
ansible/roles/kolla-ansible/tasks/install.yml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Kolla patches are applied
|
||||||
|
patch:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ item.dest }}"
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- src: baremetal-docker_yum_repo.j2.patch
|
||||||
|
dest: "{{ kolla_install_dir }}/ansible/roles/baremetal/templates/docker_yum_repo.j2"
|
||||||
|
- src: baremetal-pre-install.yml.patch
|
||||||
|
dest: "{{ kolla_install_dir }}/ansible/roles/baremetal/tasks/pre-install.yml"
|
||||||
|
- src: ironic-api.json.j2.patch
|
||||||
|
dest: "{{ kolla_install_dir }}/ansible/roles/ironic/templates/ironic-api.json.j2"
|
||||||
|
- src: ironic-conductor.json.j2.patch
|
||||||
|
dest: "{{ kolla_install_dir }}/ansible/roles/ironic/templates/ironic-conductor.json.j2"
|
||||||
|
- src: prechecks-port_checks.yml.patch
|
||||||
|
dest: "{{ kolla_install_dir }}/ansible/roles/prechecks/tasks/port_checks.yml"
|
||||||
|
- src: kolla-ansible.patch
|
||||||
|
dest: "{{ kolla_venv }}/bin/kolla-ansible"
|
3
ansible/roles/kolla-ansible/tasks/main.yml
Normal file
3
ansible/roles/kolla-ansible/tasks/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- include: install.yml
|
||||||
|
- include: config.yml
|
253
ansible/roles/kolla-ansible/templates/globals.yml.j2
Normal file
253
ansible/roles/kolla-ansible/templates/globals.yml.j2
Normal file
@ -0,0 +1,253 @@
|
|||||||
|
---
|
||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
# You can use this file to override _any_ variable throughout Kolla.
|
||||||
|
# Additional options can be found in the 'kolla/ansible/group_vars/all.yml' file.
|
||||||
|
# Default value of all the commented parameters are shown here, To override
|
||||||
|
# the default value uncomment the parameter and change its value.
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Kolla options
|
||||||
|
###################
|
||||||
|
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
|
||||||
|
config_strategy: "COPY_ALWAYS"
|
||||||
|
|
||||||
|
# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
|
||||||
|
kolla_base_distro: "{{ kolla_base_distro }}"
|
||||||
|
|
||||||
|
# Valid options are [ binary, source ]
|
||||||
|
kolla_install_type: "{{ kolla_install_type }}"
|
||||||
|
|
||||||
|
# Valid option is Docker repository tag
|
||||||
|
openstack_release: "{{ kolla_openstack_release }}"
|
||||||
|
|
||||||
|
# This should be a VIP, an unused IP on your network that will float between
|
||||||
|
# the hosts running keepalived for high-availability. When running an All-In-One
|
||||||
|
# without haproxy and keepalived, this should be the first IP on your
|
||||||
|
# 'network_interface' as set in the Networking section below.
|
||||||
|
kolla_internal_vip_address: "{{ kolla_internal_vip_address }}"
|
||||||
|
|
||||||
|
# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
|
||||||
|
# default it is the same as kolla_internal_vip_address.
|
||||||
|
kolla_internal_fqdn: "{{ kolla_internal_fqdn }}"
|
||||||
|
|
||||||
|
# This should be a VIP, an unused IP on your network that will float between
|
||||||
|
# the hosts running keepalived for high-availability. It defaults to the
|
||||||
|
# kolla_internal_vip_address, allowing internal and external communication to
|
||||||
|
# share the same address. Specify a kolla_external_vip_address to separate
|
||||||
|
# internal and external requests between two VIPs.
|
||||||
|
kolla_external_vip_address: "{{ kolla_external_vip_address }}"
|
||||||
|
|
||||||
|
# The Public address used to communicate with OpenStack as set in the public_url
|
||||||
|
# for the endpoints that will be created. This DNS name should map to
|
||||||
|
# kolla_external_vip_address.
|
||||||
|
kolla_external_fqdn: "{{ kolla_external_fqdn }}"
|
||||||
|
|
||||||
|
# Path to directory containing Kolla custom configuration files.
|
||||||
|
node_custom_config: "{{ kolla_node_custom_config_path }}"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Docker options
|
||||||
|
####################
|
||||||
|
### Example: Private repository with authentication
|
||||||
|
|
||||||
|
#docker_registry: "172.16.0.10:4000"
|
||||||
|
#docker_namespace: "companyname"
|
||||||
|
#docker_registry_username: "sam"
|
||||||
|
#docker_registry_password: "correcthorsebatterystaple"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Networking options
|
||||||
|
####################
|
||||||
|
# This interface is what all your api services will be bound to by default.
|
||||||
|
# Additionally, all vxlan/tunnel and storage network traffic will go over this
|
||||||
|
# interface by default. This interface must contain an IPv4 address.
|
||||||
|
# It is possible for hosts to have non-matching names of interfaces - these can
|
||||||
|
# be set in an inventory file per host or per group or stored separately, see
|
||||||
|
# http://docs.ansible.com/ansible/intro_inventory.html
|
||||||
|
# Yet another way to workaround the naming problem is to create a bond for the
|
||||||
|
# interface on all hosts and give the bond name here. Similar strategy can be
|
||||||
|
# followed for other types of interfaces.
|
||||||
|
network_interface: "{{ kolla_network_interface }}"
|
||||||
|
|
||||||
|
# These can be adjusted for even more customization. The default is the same as
|
||||||
|
# the 'network_interface'. These interfaces must contain an IPv4 address.
|
||||||
|
kolla_external_vip_interface: "{{ kolla_external_vip_interface }}"
|
||||||
|
api_interface: "{{ kolla_api_interface }}"
|
||||||
|
storage_interface: "{{ kolla_storage_interface }}"
|
||||||
|
cluster_interface: "{{ kolla_cluster_interface }}"
|
||||||
|
# NOTE: tunnel_interface is unused.
|
||||||
|
|
||||||
|
# This is the raw interface given to neutron as its external network port. Even
|
||||||
|
# though an IP address can exist on this interface, it will be unusable in most
|
||||||
|
# configurations. It is recommended this interface not be configured with any IP
|
||||||
|
# addresses for that reason.
|
||||||
|
neutron_external_interface: "{{ kolla_neutron_external_interface }}"
|
||||||
|
|
||||||
|
# Name of the Neutron external bridge.
|
||||||
|
neutron_bridge_name: "{{ kolla_neutron_bridge_name }}"
|
||||||
|
|
||||||
|
# This is the interface to use for Bifrost bare metal provisioning of the
|
||||||
|
# control plane.
|
||||||
|
bifrost_network_interface: "{{ kolla_bifrost_network_interface }}"
|
||||||
|
|
||||||
|
# This is the interface to use for bare metal provisioning. It is not a
|
||||||
|
# standard kolla variable.
|
||||||
|
provision_interface: "{{ kolla_provision_interface }}"
|
||||||
|
|
||||||
|
# Valid options are [ openvswitch, linuxbridge ]
|
||||||
|
neutron_plugin_agent: "openvswitch"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# keepalived options
|
||||||
|
####################
|
||||||
|
# Arbitrary unique number from 0..255
|
||||||
|
#keepalived_virtual_router_id: "51"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# TLS options
|
||||||
|
####################
|
||||||
|
# To provide encryption and authentication on the kolla_external_vip_interface,
|
||||||
|
# TLS can be enabled. When TLS is enabled, certificates must be provided to
|
||||||
|
# allow clients to perform authentication.
|
||||||
|
kolla_enable_tls_external: "{{ kolla_enable_tls_external }}"
|
||||||
|
kolla_external_fqdn_cert: "{{ kolla_external_fqdn_cert }}"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# OpenStack options
|
||||||
|
####################
|
||||||
|
# Use these options to set the various log levels across all OpenStack projects
|
||||||
|
# Valid options are [ True, False ]
|
||||||
|
openstack_logging_debug: {{ kolla_openstack_logging_debug }}
|
||||||
|
|
||||||
|
# Valid options are [ novnc, spice ]
|
||||||
|
#nova_console: "novnc"
|
||||||
|
|
||||||
|
# OpenStack services can be enabled or disabled with these options
|
||||||
|
{% for feature_flag in kolla_feature_flags %}
|
||||||
|
{% if ('kolla_enable_' ~ feature_flag) in hostvars[inventory_hostname] %}
|
||||||
|
enable_{{ feature_flag }}: {{ hostvars[inventory_hostname]['kolla_enable_' ~ feature_flag] | bool }}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Ceph options
|
||||||
|
###################
|
||||||
|
# Ceph can be setup with a caching to improve performance. To use the cache you
|
||||||
|
# must provide separate disks than those for the OSDs
|
||||||
|
#ceph_enable_cache: "no"
|
||||||
|
# Valid options are [ forward, none, writeback ]
|
||||||
|
#ceph_cache_mode: "writeback"
|
||||||
|
|
||||||
|
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
||||||
|
# Valid options are [ erasure, replicated ]
|
||||||
|
#ceph_pool_type: "replicated"
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Keystone options
|
||||||
|
#######################
|
||||||
|
|
||||||
|
# Valid options are [ uuid, fernet ]
|
||||||
|
#keystone_token_provider: 'uuid'
|
||||||
|
|
||||||
|
# Interval to rotate fernet keys by (in seconds). Must be an interval of
|
||||||
|
# 60(1 min), 120(2 min), 180(3 min), 240(4 min), 300(5 min), 360(6 min),
|
||||||
|
# 600(10 min), 720(12 min), 900(15 min), 1200(20 min), 1800(30 min),
|
||||||
|
# 3600(1 hour), 7200(2 hour), 10800(3 hour), 14400(4 hour), 21600(6 hour),
|
||||||
|
# 28800(8 hour), 43200(12 hour), 86400(1 day), 604800(1 week).
|
||||||
|
#fernet_token_expiry: 86400
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Glance options
|
||||||
|
#######################
|
||||||
|
# Configure image backend.
|
||||||
|
#glance_backend_file: "yes"
|
||||||
|
#glance_backend_ceph: "no"
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Ceilometer options
|
||||||
|
#######################
|
||||||
|
# Valid options are [ mongodb, mysql, gnocchi ]
|
||||||
|
#ceilometer_database_type: "mongodb"
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Gnocchi options
|
||||||
|
#######################
|
||||||
|
# Valid options are [ file, ceph ]
|
||||||
|
#gnocchi_backend_storage: "{% raw %}{{ 'ceph' if enable_ceph|bool else 'file' }}{% endraw %}"
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Cinder options
|
||||||
|
#######################
|
||||||
|
# Enable / disable Cinder backends
|
||||||
|
#cinder_backend_ceph: "{% raw %}{{ enable_ceph }}{% endraw %}"
|
||||||
|
|
||||||
|
#cinder_volume_group: "cinder-volumes"
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Nova options
|
||||||
|
#######################
|
||||||
|
#nova_backend_ceph: "{% raw %}{{ enable_ceph }}{% endraw %}"
|
||||||
|
|
||||||
|
|
||||||
|
#######################
|
||||||
|
# Horizon options
|
||||||
|
#######################
|
||||||
|
#horizon_backend_database: "no"
|
||||||
|
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
# Manila - Shared File Systems Options
|
||||||
|
#######################################
|
||||||
|
# Valid options are [ True, False ]
|
||||||
|
#driver_handles_share_servers: "True"
|
||||||
|
|
||||||
|
|
||||||
|
##################################
|
||||||
|
# Swift - Object Storage Options
|
||||||
|
##################################
|
||||||
|
# Swift expects block devices to be available for storage. Two types of storage
|
||||||
|
# are supported: 1 - storage device with a special partition name and filesystem
|
||||||
|
# label, 2 - unpartitioned disk with a filesystem. The label of this filesystem
|
||||||
|
# is used to detect the disk which Swift will be using.
|
||||||
|
|
||||||
|
# Swift support two mathcing modes, valid options are [ prefix, strict ]
|
||||||
|
#swift_devices_match_mode: "strict"
|
||||||
|
|
||||||
|
# This parameter defines matching pattern: if "strict" mode was selected,
|
||||||
|
# for swift_devices_match_mode then swift_device_name should specify the name of
|
||||||
|
# the special swift partition for example: "KOLLA_SWIFT_DATA", if "prefix" mode was
|
||||||
|
# selected then swift_devices_name should specify a pattern which would match to
|
||||||
|
# filesystems' labels prepared for swift.
|
||||||
|
#swift_devices_name: "KOLLA_SWIFT_DATA"
|
||||||
|
|
||||||
|
|
||||||
|
################################################
|
||||||
|
# Tempest - The OpenStack Integration Test Suite
|
||||||
|
################################################
|
||||||
|
# following value must be set when enable tempest
|
||||||
|
tempest_image_id:
|
||||||
|
tempest_flavor_ref_id:
|
||||||
|
tempest_public_network_id:
|
||||||
|
tempest_floating_network_name:
|
||||||
|
|
||||||
|
# tempest_image_alt_id: "{% raw %}{{ tempest_image_id }}{% endraw %}"
|
||||||
|
# tempest_flavor_ref_alt_id: "{% raw %}{{ tempest_flavor_ref_id }}{% endraw %}"
|
||||||
|
|
||||||
|
|
||||||
|
{% if kolla_extra_globals %}
|
||||||
|
#######################
|
||||||
|
# Extra configuration
|
||||||
|
#######################
|
||||||
|
|
||||||
|
{{ kolla_extra_globals|to_nice_yaml }}
|
||||||
|
{% endif %}
|
409
ansible/roles/kolla-ansible/templates/overcloud.j2
Normal file
409
ansible/roles/kolla-ansible/templates/overcloud.j2
Normal file
@ -0,0 +1,409 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
# Overcloud inventory for Kolla. Adapted from multinode inventory in Kolla
|
||||||
|
# repository.
|
||||||
|
|
||||||
|
[controllers]
|
||||||
|
# These hostnames must be resolvable from your deployment host
|
||||||
|
{% for controller in groups['controllers'] %}
|
||||||
|
{% set controller_hv=hostvars[controller] %}
|
||||||
|
{{ controller }}{% if "ansible_host" in controller_hv %} ansible_host={{ controller_hv["ansible_host"] }}{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[controllers:vars]
|
||||||
|
ansible_user=kolla
|
||||||
|
ansible_become=true
|
||||||
|
|
||||||
|
# These initial groups are the only groups required to be modified. The
|
||||||
|
# additional groups are for more control of the environment.
|
||||||
|
[control:children]
|
||||||
|
controllers
|
||||||
|
|
||||||
|
# The network nodes are where your l3-agent and loadbalancers will run
|
||||||
|
# This can be the same as a host in the control group
|
||||||
|
[network:children]
|
||||||
|
controllers
|
||||||
|
|
||||||
|
[compute:children]
|
||||||
|
controllers
|
||||||
|
|
||||||
|
[monitoring:children]
|
||||||
|
controllers
|
||||||
|
|
||||||
|
[storage:children]
|
||||||
|
controllers
|
||||||
|
|
||||||
|
[baremetal:children]
|
||||||
|
control
|
||||||
|
network
|
||||||
|
compute
|
||||||
|
storage
|
||||||
|
monitoring
|
||||||
|
|
||||||
|
# You can explicitly specify which hosts run each project by updating the
|
||||||
|
# groups in the sections below. Common services are grouped together.
|
||||||
|
[collectd:children]
|
||||||
|
compute
|
||||||
|
|
||||||
|
[grafana:children]
|
||||||
|
monitoring
|
||||||
|
|
||||||
|
[etcd:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[influxdb:children]
|
||||||
|
monitoring
|
||||||
|
|
||||||
|
[kibana:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[telegraf:children]
|
||||||
|
monitoring
|
||||||
|
|
||||||
|
[elasticsearch:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[haproxy:children]
|
||||||
|
network
|
||||||
|
|
||||||
|
[mariadb:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[rabbitmq:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[mongodb:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[keystone:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[glance:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[nova:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[neutron:children]
|
||||||
|
network
|
||||||
|
|
||||||
|
[cinder:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[cloudkitty:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[memcached:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[horizon:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[swift:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[barbican:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[heat:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[murano:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[ironic:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[ceph:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[magnum:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[sahara:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[mistral:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[manila:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[ceilometer:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[aodh:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[congress:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[gnocchi:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
# Tempest
|
||||||
|
[tempest:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[senlin:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[vmtp:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[watcher:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[rally:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
# Additional control implemented here. These groups allow you to control which
|
||||||
|
# services run on which hosts at a per-service level.
|
||||||
|
#
|
||||||
|
# Word of caution: Some services are required to run on the same host to
|
||||||
|
# function appropriately. For example, neutron-metadata-agent must run on the
|
||||||
|
# same host as the l3-agent and (depending on configuration) the dhcp-agent.
|
||||||
|
|
||||||
|
# Glance
|
||||||
|
[glance-api:children]
|
||||||
|
glance
|
||||||
|
|
||||||
|
[glance-registry:children]
|
||||||
|
glance
|
||||||
|
|
||||||
|
# Nova
|
||||||
|
[nova-api:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
[nova-conductor:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
[nova-consoleauth:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
[nova-novncproxy:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
[nova-scheduler:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
[nova-spicehtml5proxy:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
[nova-compute-ironic:children]
|
||||||
|
nova
|
||||||
|
|
||||||
|
# Neutron
|
||||||
|
[neutron-server:children]
|
||||||
|
control
|
||||||
|
|
||||||
|
[neutron-dhcp-agent:children]
|
||||||
|
neutron
|
||||||
|
|
||||||
|
[neutron-l3-agent:children]
|
||||||
|
neutron
|
||||||
|
|
||||||
|
[neutron-lbaas-agent:children]
|
||||||
|
neutron
|
||||||
|
|
||||||
|
[neutron-metadata-agent:children]
|
||||||
|
neutron
|
||||||
|
|
||||||
|
[neutron-vpnaas-agent:children]
|
||||||
|
neutron
|
||||||
|
|
||||||
|
# Ceph
|
||||||
|
[ceph-mon:children]
|
||||||
|
ceph
|
||||||
|
|
||||||
|
[ceph-rgw:children]
|
||||||
|
ceph
|
||||||
|
|
||||||
|
[ceph-osd:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
# Cinder
|
||||||
|
[cinder-api:children]
|
||||||
|
cinder
|
||||||
|
|
||||||
|
[cinder-backup:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
[cinder-scheduler:children]
|
||||||
|
cinder
|
||||||
|
|
||||||
|
[cinder-volume:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
# Cloudkitty
|
||||||
|
[cloudkitty-api:children]
|
||||||
|
cloudkitty
|
||||||
|
|
||||||
|
[cloudkitty-processor:children]
|
||||||
|
cloudkitty
|
||||||
|
|
||||||
|
# iSCSI
|
||||||
|
[iscsid:children]
|
||||||
|
compute
|
||||||
|
storage
|
||||||
|
ironic-conductor
|
||||||
|
|
||||||
|
[tgtd:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
# Manila
|
||||||
|
[manila-api:children]
|
||||||
|
manila
|
||||||
|
|
||||||
|
[manila-scheduler:children]
|
||||||
|
manila
|
||||||
|
|
||||||
|
[manila-share:children]
|
||||||
|
network
|
||||||
|
|
||||||
|
# Swift
|
||||||
|
[swift-proxy-server:children]
|
||||||
|
swift
|
||||||
|
|
||||||
|
[swift-account-server:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
[swift-container-server:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
[swift-object-server:children]
|
||||||
|
storage
|
||||||
|
|
||||||
|
# Barbican
|
||||||
|
[barbican-api:children]
|
||||||
|
barbican
|
||||||
|
|
||||||
|
[barbican-keystone-listener:children]
|
||||||
|
barbican
|
||||||
|
|
||||||
|
[barbican-worker:children]
|
||||||
|
barbican
|
||||||
|
|
||||||
|
# Heat
|
||||||
|
[heat-api:children]
|
||||||
|
heat
|
||||||
|
|
||||||
|
[heat-api-cfn:children]
|
||||||
|
heat
|
||||||
|
|
||||||
|
[heat-engine:children]
|
||||||
|
heat
|
||||||
|
|
||||||
|
# Murano
|
||||||
|
[murano-api:children]
|
||||||
|
murano
|
||||||
|
|
||||||
|
[murano-engine:children]
|
||||||
|
murano
|
||||||
|
|
||||||
|
# Ironic
|
||||||
|
[ironic-api:children]
|
||||||
|
ironic
|
||||||
|
|
||||||
|
[ironic-conductor:children]
|
||||||
|
ironic
|
||||||
|
|
||||||
|
[ironic-inspector:children]
|
||||||
|
ironic
|
||||||
|
|
||||||
|
[ironic-pxe:children]
|
||||||
|
ironic
|
||||||
|
|
||||||
|
# Magnum
|
||||||
|
[magnum-api:children]
|
||||||
|
magnum
|
||||||
|
|
||||||
|
[magnum-conductor:children]
|
||||||
|
magnum
|
||||||
|
|
||||||
|
# Sahara
|
||||||
|
[sahara-api:children]
|
||||||
|
sahara
|
||||||
|
|
||||||
|
[sahara-engine:children]
|
||||||
|
sahara
|
||||||
|
|
||||||
|
# Mistral
|
||||||
|
[mistral-api:children]
|
||||||
|
mistral
|
||||||
|
|
||||||
|
[mistral-executor:children]
|
||||||
|
mistral
|
||||||
|
|
||||||
|
[mistral-engine:children]
|
||||||
|
mistral
|
||||||
|
|
||||||
|
# Ceilometer
|
||||||
|
[ceilometer-api:children]
|
||||||
|
ceilometer
|
||||||
|
|
||||||
|
[ceilometer-central:children]
|
||||||
|
ceilometer
|
||||||
|
|
||||||
|
[ceilometer-notification:children]
|
||||||
|
ceilometer
|
||||||
|
|
||||||
|
[ceilometer-collector:children]
|
||||||
|
ceilometer
|
||||||
|
|
||||||
|
[ceilometer-compute:children]
|
||||||
|
compute
|
||||||
|
|
||||||
|
# Aodh
|
||||||
|
[aodh-api:children]
|
||||||
|
aodh
|
||||||
|
|
||||||
|
[aodh-evaluator:children]
|
||||||
|
aodh
|
||||||
|
|
||||||
|
[aodh-listener:children]
|
||||||
|
aodh
|
||||||
|
|
||||||
|
[aodh-notifier:children]
|
||||||
|
aodh
|
||||||
|
|
||||||
|
# Congress
|
||||||
|
[congress-api:children]
|
||||||
|
congress
|
||||||
|
|
||||||
|
[congress-datasource:children]
|
||||||
|
congress
|
||||||
|
|
||||||
|
[congress-policy-engine:children]
|
||||||
|
congress
|
||||||
|
|
||||||
|
# Gnocchi
|
||||||
|
[gnocchi-api:children]
|
||||||
|
gnocchi
|
||||||
|
|
||||||
|
[gnocchi-statsd:children]
|
||||||
|
gnocchi
|
||||||
|
|
||||||
|
[gnocchi-metricd:children]
|
||||||
|
gnocchi
|
||||||
|
|
||||||
|
# Multipathd
|
||||||
|
[multipathd:children]
|
||||||
|
compute
|
||||||
|
|
||||||
|
# Watcher
|
||||||
|
[watcher-api:children]
|
||||||
|
watcher
|
||||||
|
|
||||||
|
[watcher-engine:children]
|
||||||
|
watcher
|
||||||
|
|
||||||
|
[watcher-applier:children]
|
||||||
|
watcher
|
||||||
|
|
||||||
|
# Senlin
|
||||||
|
[senlin-api:children]
|
||||||
|
senlin
|
||||||
|
|
||||||
|
[senlin-engine:children]
|
||||||
|
senlin
|
17
ansible/roles/kolla-ansible/templates/seed.j2
Normal file
17
ansible/roles/kolla-ansible/templates/seed.j2
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
# Simple inventory for bootstrapping Kolla seed node.
|
||||||
|
[seed]
|
||||||
|
{% for seed in groups['seed'] %}
|
||||||
|
{% set seed_hv=hostvars[seed] %}
|
||||||
|
{{ seed }}{% if "ansible_host" in seed_hv %} ansible_host={{ seed_hv["ansible_host"] }}{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
[seed:vars]
|
||||||
|
ansible_user=kolla
|
||||||
|
|
||||||
|
[baremetal:children]
|
||||||
|
seed
|
||||||
|
|
||||||
|
[bifrost:children]
|
||||||
|
seed
|
42
ansible/roles/kolla-ansible/vars/main.yml
Normal file
42
ansible/roles/kolla-ansible/vars/main.yml
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
kolla_install_dir: "{{ kolla_venv }}/share/kolla"
|
||||||
|
|
||||||
|
# List of features supported by Kolla as enable_* flags.
|
||||||
|
kolla_feature_flags:
|
||||||
|
- aodh
|
||||||
|
- barbican
|
||||||
|
- ceilometer
|
||||||
|
- central_logging
|
||||||
|
- ceph
|
||||||
|
- ceph_rgw
|
||||||
|
- cinder
|
||||||
|
- cinder_backend_iscsi
|
||||||
|
- cinder_backend_lvm
|
||||||
|
- cloudkitty
|
||||||
|
- congress
|
||||||
|
- etcd
|
||||||
|
- gnocchi
|
||||||
|
- grafana
|
||||||
|
- heat
|
||||||
|
- horizon
|
||||||
|
- influxdb
|
||||||
|
- ironic
|
||||||
|
- kuryr
|
||||||
|
- magnum
|
||||||
|
- manila
|
||||||
|
- mistral
|
||||||
|
- mongodb
|
||||||
|
- murano
|
||||||
|
- multipathd
|
||||||
|
- neutron_dvr
|
||||||
|
- neutron_lbaas
|
||||||
|
- neutron_qos
|
||||||
|
- neutron_agent_ha
|
||||||
|
- neutron_vpnaas
|
||||||
|
- rally
|
||||||
|
- sahara
|
||||||
|
- senlin
|
||||||
|
- swift
|
||||||
|
- telegraf
|
||||||
|
- tempest
|
||||||
|
- watcher
|
46
ansible/roles/kolla-bifrost/defaults/main.yml
Normal file
46
ansible/roles/kolla-bifrost/defaults/main.yml
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
# Directory where Kolla custom configuration files will be installed.
|
||||||
|
kolla_node_custom_config_path:
|
||||||
|
|
||||||
|
# DIB image OS element.
|
||||||
|
kolla_bifrost_dib_os_element:
|
||||||
|
|
||||||
|
# List of DIB image elements.
|
||||||
|
kolla_bifrost_dib_elements: []
|
||||||
|
|
||||||
|
# DIB image init element.
|
||||||
|
kolla_bifrost_dib_init_element:
|
||||||
|
|
||||||
|
# DIB image environment variables.
|
||||||
|
kolla_bifrost_dib_env_vars: {}
|
||||||
|
|
||||||
|
# List of DIB image packages.
|
||||||
|
kolla_bifrost_dib_packages: []
|
||||||
|
|
||||||
|
# List of Ironic drivers to enable.
|
||||||
|
kolla_bifrost_enabled_drivers: []
|
||||||
|
|
||||||
|
# Whether to enable the Ironic PXE drivers.
|
||||||
|
kolla_bifrost_enable_pxe_drivers: false
|
||||||
|
|
||||||
|
# List of Ironic PXE drivers to enable if kolla_bifrost_enable_pxe_drivers.
|
||||||
|
kolla_bifrost_pxe_drivers: []
|
||||||
|
|
||||||
|
# IP address range for DHCP.
|
||||||
|
kolla_bifrost_dhcp_pool_start:
|
||||||
|
kolla_bifrost_dhcp_pool_end:
|
||||||
|
|
||||||
|
# Default route provided to nodes via DHCP.
|
||||||
|
kolla_bifrost_dnsmasq_router:
|
||||||
|
|
||||||
|
# List of DNS servers provided to nodes via DHCP.
|
||||||
|
kolla_bifrost_dnsmasq_dns_servers: []
|
||||||
|
|
||||||
|
# DNS domain provided to nodes via DHCP.
|
||||||
|
kolla_bifrost_domain:
|
||||||
|
|
||||||
|
# Server inventory to be configured in {{ kolla_node_custom_config_path }}/bifrost/servers.yml.
|
||||||
|
kolla_bifrost_servers: {}
|
||||||
|
|
||||||
|
# Free form extra configuration to append to {{ kolla_node_custom_config_path }}/bifrost/bifrost.yml.
|
||||||
|
kolla_bifrost_extra_globals:
|
18
ansible/roles/kolla-bifrost/tasks/main.yml
Normal file
18
ansible/roles/kolla-bifrost/tasks/main.yml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure the Kolla Bifrost configuration directores exist
|
||||||
|
file:
|
||||||
|
path: "{{ kolla_node_custom_config_path }}/bifrost"
|
||||||
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure the Kolla Bifrost configuration files exist
|
||||||
|
template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ kolla_node_custom_config_path }}/bifrost/{{ item.dest }}"
|
||||||
|
mode: 0644
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- { src: bifrost.yml.j2, dest: bifrost.yml }
|
||||||
|
- { src: dib.yml.j2, dest: dib.yml }
|
||||||
|
- { src: servers.yml.j2, dest: servers.yml }
|
35
ansible/roles/kolla-bifrost/templates/bifrost.yml.j2
Normal file
35
ansible/roles/kolla-bifrost/templates/bifrost.yml.j2
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
# List of enabled Ironic drivers.
|
||||||
|
enabled_drivers: "{{ kolla_bifrost_enabled_drivers | join(',') }}"
|
||||||
|
|
||||||
|
# Whether to enable legacy PXE/iscsi drivers.
|
||||||
|
enable_pxe_drivers: {{ kolla_bifrost_enable_pxe_drivers | bool }}
|
||||||
|
|
||||||
|
# List of legacy PXE/iscsi drivers to enable.
|
||||||
|
pxe_drivers: "{{ kolla_bifrost_pxe_drivers | join(',') }}"
|
||||||
|
|
||||||
|
# IP address range for DHCP.
|
||||||
|
dhcp_pool_start: "{{ kolla_bifrost_dhcp_pool_start }}"
|
||||||
|
dhcp_pool_end: "{{ kolla_bifrost_dhcp_pool_end }}"
|
||||||
|
|
||||||
|
{% if kolla_bifrost_dnsmasq_router %}
|
||||||
|
# Default route provided to nodes via DHCP.
|
||||||
|
dnsmasq_router: "{{ kolla_bifrost_dnsmasq_router }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if kolla_bifrost_dnsmasq_dns_servers %}
|
||||||
|
# DNS servers provided to nodes via DHCP.
|
||||||
|
dnsmasq_dns_servers: "{{ kolla_bifrost_dnsmasq_dns_servers | join(',') }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if kolla_bifrost_domain %}
|
||||||
|
# DNS domain provided to nodes via DHCP.
|
||||||
|
domain: "{{ kolla_bifrost_domain }}"
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if kolla_bifrost_extra_globals %}
|
||||||
|
###############################################################################
|
||||||
|
# Extra configuration
|
||||||
|
|
||||||
|
{{ kolla_bifrost_extra_globals|to_nice_yaml }}
|
||||||
|
{% endif %}
|
12
ansible/roles/kolla-bifrost/templates/dib.yml.j2
Normal file
12
ansible/roles/kolla-bifrost/templates/dib.yml.j2
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
# Diskimage-builder element for base OS.
|
||||||
|
dib_os_element: "{{ kolla_bifrost_dib_os_element }}"
|
||||||
|
|
||||||
|
# List of DIB image elements.
|
||||||
|
dib_elements: "{{ (kolla_bifrost_dib_elements + [kolla_bifrost_dib_init_element]) | join(' ') }}"
|
||||||
|
|
||||||
|
# DIB image environment variables.
|
||||||
|
dib_env_vars: "{{ kolla_bifrost_dib_env_vars }}"
|
||||||
|
|
||||||
|
# List of DIB image packages.
|
||||||
|
dib_packages: "{{ kolla_bifrost_dib_packages | join(',') }}"
|
2
ansible/roles/kolla-bifrost/templates/servers.yml.j2
Normal file
2
ansible/roles/kolla-bifrost/templates/servers.yml.j2
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
{{ kolla_bifrost_servers | to_nice_yaml }}
|
12
ansible/roles/kolla-build/defaults/main.yml
Normal file
12
ansible/roles/kolla-build/defaults/main.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
---
|
||||||
|
# Directory where Kolla config files will be installed.
|
||||||
|
kolla_config_path:
|
||||||
|
|
||||||
|
# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
|
||||||
|
kolla_base_distro:
|
||||||
|
|
||||||
|
# Valid options are [ binary, source ]
|
||||||
|
kolla_install_type:
|
||||||
|
|
||||||
|
# Valid option is Docker repository tag
|
||||||
|
kolla_openstack_release:
|
10
ansible/roles/kolla-build/tasks/main.yml
Normal file
10
ansible/roles/kolla-build/tasks/main.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure the Kolla build configuration files exist
|
||||||
|
template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ kolla_config_path }}/{{ item.dest }}"
|
||||||
|
mode: 0644
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- { src: kolla-build.conf.j2, dest: kolla-build.conf }
|
||||||
|
- { src: template-override.j2.j2, dest: template-override.j2 }
|
15
ansible/roles/kolla-build/templates/kolla-build.conf.j2
Normal file
15
ansible/roles/kolla-build/templates/kolla-build.conf.j2
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
[DEFAULT]
|
||||||
|
|
||||||
|
# Base container image distribution.
|
||||||
|
base={{ kolla_base_distro }}
|
||||||
|
|
||||||
|
# Method of OpenStack install. Valid options are [ binary, source ]
|
||||||
|
type={{ kolla_install_type }}
|
||||||
|
|
||||||
|
# Docker image tag to apply.
|
||||||
|
tag={{ kolla_openstack_release }}
|
||||||
|
|
||||||
|
# Path to a file containing template overrides.
|
||||||
|
template_override={{ kolla_config_path }}/template-override.j2
|
11
ansible/roles/kolla-build/templates/template-override.j2.j2
Normal file
11
ansible/roles/kolla-build/templates/template-override.j2.j2
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
{% raw %}
|
||||||
|
{% extends parent_template %}
|
||||||
|
|
||||||
|
# Disable troublesome keys
|
||||||
|
{% set base_yum_repo_keys_override=['http://yum.mariadb.org/RPM-GPG-KEY-MariaDB'] %}
|
||||||
|
|
||||||
|
# Disable repos with troublesome keys
|
||||||
|
{% set base_yum_repo_files_override=['MariaDB.repo'] %}
|
||||||
|
{% endraw %}
|
3
ansible/roles/kolla-host/defaults/main.yml
Normal file
3
ansible/roles/kolla-host/defaults/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# SSH public key to be authorized by the Kolla host.
|
||||||
|
kolla_authorized_key:
|
14
ansible/roles/kolla-host/tasks/main.yml
Normal file
14
ansible/roles/kolla-host/tasks/main.yml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure required packages are installed
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- vim
|
||||||
|
|
||||||
|
- name: Ensure Kolla user has authorized our SSH key
|
||||||
|
authorized_key:
|
||||||
|
user: kolla
|
||||||
|
key: "{{ kolla_authorized_key }}"
|
||||||
|
become: True
|
17
ansible/roles/kolla-openstack/defaults/main.yml
Normal file
17
ansible/roles/kolla-openstack/defaults/main.yml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
# Directory where Kolla custom configuration files will be installed.
|
||||||
|
kolla_node_custom_config_path:
|
||||||
|
|
||||||
|
# List of enabled Ironic drivers.
|
||||||
|
ironic_drivers:
|
||||||
|
- agent_ssh
|
||||||
|
- agent_ipmitool
|
||||||
|
- pxe_ssh
|
||||||
|
- pxe_ipmitool
|
||||||
|
|
||||||
|
# Free form extra configuration to append to glance-api.conf and
|
||||||
|
# glance-registry.conf.
|
||||||
|
kolla_extra_glance:
|
||||||
|
|
||||||
|
# Free form extra configuration to append to ironic.conf.
|
||||||
|
kolla_extra_ironic:
|
24
ansible/roles/kolla-openstack/tasks/main.yml
Normal file
24
ansible/roles/kolla-openstack/tasks/main.yml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure the Kolla OpenStack configuration directores exist
|
||||||
|
file:
|
||||||
|
path: "{{ kolla_node_custom_config_path }}/{{ item.name }}"
|
||||||
|
state: directory
|
||||||
|
owner: kolla
|
||||||
|
group: kolla
|
||||||
|
mode: 0755
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- { name: swift, enabled: "{{ kolla_enable_swift }}" }
|
||||||
|
when: "{{ item.enabled | bool }}"
|
||||||
|
|
||||||
|
- name: Ensure the Kolla OpenStack configuration files exist
|
||||||
|
template:
|
||||||
|
src: "{{ item.src }}"
|
||||||
|
dest: "{{ kolla_node_custom_config_path }}/{{ item.dest }}"
|
||||||
|
owner: kolla
|
||||||
|
group: kolla
|
||||||
|
mode: 0644
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- { src: glance.conf.j2, dest: glance.conf, enabled: "{{ kolla_enable_glance }}" }
|
||||||
|
- { src: ironic.conf.j2, dest: ironic.conf, enabled: "{{ kolla_enable_ironic }}" }
|
38
ansible/roles/kolla-openstack/templates/glance.conf.j2
Normal file
38
ansible/roles/kolla-openstack/templates/glance.conf.j2
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
{% if kolla_enable_swift | bool %}
|
||||||
|
[glance_store]
|
||||||
|
|
||||||
|
default_store=swift
|
||||||
|
stores=swift
|
||||||
|
{% raw %}
|
||||||
|
swift_store_user=service:{{ glance_keystone_user }}
|
||||||
|
swift_store_key={{ glance_keystone_password }}
|
||||||
|
swift_store_auth_version=2
|
||||||
|
swift_store_auth_address={{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v2.0
|
||||||
|
{% endraw %}
|
||||||
|
swift_store_endpoint_type=internalURL
|
||||||
|
swift_store_create_container_on_put=True
|
||||||
|
|
||||||
|
#swift_store_config_file=/etc/glance/glance-api.conf
|
||||||
|
#default_swift_reference=swift_store_ref
|
||||||
|
|
||||||
|
#[swift_store_ref]
|
||||||
|
#{% raw %}
|
||||||
|
#user=service:{{ glance_keystone_user }}
|
||||||
|
#key={{ glance_keystone_password }}
|
||||||
|
#auth_version=2
|
||||||
|
#auth_address={{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v2.0
|
||||||
|
#user_domain_id=default
|
||||||
|
#project_domain_id=default
|
||||||
|
#{% endraw %}
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if kolla_extra_glance %}
|
||||||
|
#######################
|
||||||
|
# Extra configuration
|
||||||
|
#######################
|
||||||
|
|
||||||
|
{{ kolla_extra_glance }}
|
||||||
|
{% endif %}
|
22
ansible/roles/kolla-openstack/templates/ironic.conf.j2
Normal file
22
ansible/roles/kolla-openstack/templates/ironic.conf.j2
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
# {{ ansible_managed }}
|
||||||
|
|
||||||
|
[DEFAULT]
|
||||||
|
enabled_drivers = {{ ironic_drivers | join(',') }}
|
||||||
|
|
||||||
|
[conductor]
|
||||||
|
{% raw %}
|
||||||
|
api_url = {{ internal_protocol }}://{{ hostvars[inventory_hostname]['ansible_' + provision_interface | replace('-', '_')]['ipv4']['address'] }}:{{ ironic_api_port }}
|
||||||
|
{% endraw %}
|
||||||
|
|
||||||
|
[pxe]
|
||||||
|
{% raw %}
|
||||||
|
tftp_server = {{ hostvars[inventory_hostname]['ansible_' + provision_interface | replace('-', '_')]['ipv4']['address'] }}
|
||||||
|
{% endraw %}
|
||||||
|
|
||||||
|
{% if kolla_extra_ironic %}
|
||||||
|
#######################
|
||||||
|
# Extra configuration
|
||||||
|
#######################
|
||||||
|
|
||||||
|
{{ kolla_extra_ironic }}
|
||||||
|
{% endif %}
|
6
ansible/roles/kolla/defaults/main.yml
Normal file
6
ansible/roles/kolla/defaults/main.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# Virtualenv directory where Kolla will be installed.
|
||||||
|
kolla_venv: "{{ ansible_env['PWD'] }}/kolla-venv"
|
||||||
|
|
||||||
|
# Directory where Kolla config files will be installed.
|
||||||
|
kolla_config_path:
|
42
ansible/roles/kolla/tasks/main.yml
Normal file
42
ansible/roles/kolla/tasks/main.yml
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure required packages are installed
|
||||||
|
yum:
|
||||||
|
name: "{{ item }}"
|
||||||
|
state: installed
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- gcc
|
||||||
|
- libffi-devel
|
||||||
|
- openssl-devel
|
||||||
|
- patch
|
||||||
|
- python-devel
|
||||||
|
- python-pip
|
||||||
|
- python-virtualenv
|
||||||
|
|
||||||
|
- name: Ensure the latest version of pip is installed
|
||||||
|
pip:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
state: latest
|
||||||
|
virtualenv: "{{ kolla_venv }}"
|
||||||
|
with_items:
|
||||||
|
- { name: pip }
|
||||||
|
|
||||||
|
- name: Ensure required Python packages are installed
|
||||||
|
pip:
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
version: "{{ item.version }}"
|
||||||
|
state: present
|
||||||
|
virtualenv: "{{ kolla_venv }}"
|
||||||
|
with_items:
|
||||||
|
- { name: kolla, version: "{{ kolla_openstack_release }}" }
|
||||||
|
# Required for kolla-genpwd.
|
||||||
|
- { name: PyYAML, version: "3.12" }
|
||||||
|
|
||||||
|
- name: Ensure the Kolla configuration directores exist
|
||||||
|
file:
|
||||||
|
path: "{{ item }}"
|
||||||
|
state: directory
|
||||||
|
mode: 0755
|
||||||
|
become: True
|
||||||
|
with_items:
|
||||||
|
- "{{ kolla_config_path }}"
|
21
ansible/roles/libvirt-vm/defaults/main.yml
Normal file
21
ansible/roles/libvirt-vm/defaults/main.yml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
# Name of the VM.
|
||||||
|
libvirt_vm_name:
|
||||||
|
|
||||||
|
# Memory in MB.
|
||||||
|
libvirt_vm_memory_mb:
|
||||||
|
|
||||||
|
# Number of vCPUs.
|
||||||
|
libvirt_vm_vcpus:
|
||||||
|
|
||||||
|
# List of volumes.
|
||||||
|
libvirt_vm_volumes: []
|
||||||
|
|
||||||
|
# List of network interfaces.
|
||||||
|
libvirt_vm_interfaces: []
|
||||||
|
|
||||||
|
# Path to cache downloaded images.
|
||||||
|
libvirt_vm_image_cache_path:
|
||||||
|
|
||||||
|
# List of authorized SSH public keys.
|
||||||
|
#libvirt_vm_public_keys: []
|
60
ansible/roles/libvirt-vm/files/virt_volume.sh
Normal file
60
ansible/roles/libvirt-vm/files/virt_volume.sh
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Ensure that a libvirt volume exists, optionally uploading an image.
|
||||||
|
# On success, output a JSON object with a 'changed' item.
|
||||||
|
|
||||||
|
if [[ $# -ne 4 ]] && [[ $# -ne 5 ]]; then
|
||||||
|
echo "Usage: $0 <name> <pool> <capacity> <format> [<image>]"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
NAME=$1
|
||||||
|
POOL=$2
|
||||||
|
CAPACITY=$3
|
||||||
|
FORMAT=$4
|
||||||
|
IMAGE=$5
|
||||||
|
|
||||||
|
# Check whether a volume with this name exists.
|
||||||
|
output=$(virsh vol-info --pool $POOL --vol $NAME 2>&1)
|
||||||
|
result=$?
|
||||||
|
if [[ $result -eq 0 ]]; then
|
||||||
|
echo '{"changed": false}'
|
||||||
|
exit 0
|
||||||
|
elif ! echo "$output" | grep 'Storage volume not found' >/dev/null 2>&1; then
|
||||||
|
echo "Unexpected error while getting volume info"
|
||||||
|
echo "$output"
|
||||||
|
exit $result
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Create the volume.
|
||||||
|
output=$(virsh vol-create-as --pool $POOL --name $NAME --capacity $CAPACITY --format $FORMAT 2>&1)
|
||||||
|
result=$?
|
||||||
|
if [[ $result -ne 0 ]]; then
|
||||||
|
echo "Failed to create volume"
|
||||||
|
echo "$output"
|
||||||
|
exit $result
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ -n $IMAGE ]]; then
|
||||||
|
# Upload an image to the volume.
|
||||||
|
output=$(virsh vol-upload --pool $POOL --vol $NAME --file $IMAGE 2>&1)
|
||||||
|
result=$?
|
||||||
|
if [[ $result -ne 0 ]]; then
|
||||||
|
echo "Failed to upload image $IMAGE to volume $NAME"
|
||||||
|
echo "$output"
|
||||||
|
virsh vol-delete --pool $POOL --vol $NAME
|
||||||
|
exit $result
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Resize the volume to the requested capacity.
|
||||||
|
output=$(virsh vol-resize --pool $POOL --vol $NAME --capacity $CAPACITY 2>&1)
|
||||||
|
result=$?
|
||||||
|
if [[ $result -ne 0 ]]; then
|
||||||
|
echo "Failed to resize volume $VOLUME to $CAPACITY"
|
||||||
|
echo "$output"
|
||||||
|
exit $result
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo '{"changed": true}'
|
||||||
|
exit 0
|
3
ansible/roles/libvirt-vm/tasks/main.yml
Normal file
3
ansible/roles/libvirt-vm/tasks/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- include: volumes.yml
|
||||||
|
- include: vm.yml
|
16
ansible/roles/libvirt-vm/tasks/vm.yml
Normal file
16
ansible/roles/libvirt-vm/tasks/vm.yml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure the VM is defined
|
||||||
|
virt:
|
||||||
|
name: "{{ libvirt_vm_name }}"
|
||||||
|
command: define
|
||||||
|
xml: "{{ lookup('template', 'vm.xml.j2') }}"
|
||||||
|
|
||||||
|
- name: Ensure the VM is running
|
||||||
|
virt:
|
||||||
|
name: "{{ libvirt_vm_name }}"
|
||||||
|
state: running
|
||||||
|
|
||||||
|
- name: Ensure the VM is started at boot
|
||||||
|
virt:
|
||||||
|
name: "{{ libvirt_vm_name }}"
|
||||||
|
command: autostart
|
30
ansible/roles/libvirt-vm/tasks/volumes.yml
Normal file
30
ansible/roles/libvirt-vm/tasks/volumes.yml
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure remote images are downloaded
|
||||||
|
get_url:
|
||||||
|
url: "{{ item }}"
|
||||||
|
dest: "{{ libvirt_vm_image_cache_path }}/{{ item | basename }}"
|
||||||
|
with_items: "{{ libvirt_vm_volumes | selectattr('image', 'defined') | map(attribute='image') | list }}"
|
||||||
|
when: "{{ 'http' in item }}"
|
||||||
|
|
||||||
|
- name: Ensure local images are copied
|
||||||
|
copy:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ libvirt_vm_image_cache_path }}/{{ item | basename }}"
|
||||||
|
with_items: "{{ libvirt_vm_volumes | selectattr('image', 'defined') | map(attribute='image') | list }}"
|
||||||
|
when: "{{ 'http' not in item }}"
|
||||||
|
|
||||||
|
- name: Ensure the VM volumes exist
|
||||||
|
script: >
|
||||||
|
virt_volume.sh
|
||||||
|
{{ item.name }}
|
||||||
|
{{ item.pool }}
|
||||||
|
{{ item.capacity }}
|
||||||
|
{{ item.format | default('qcow2') }}
|
||||||
|
{% if item.image is defined %}
|
||||||
|
{{ libvirt_vm_image_cache_path }}/{{ item.image | basename }}
|
||||||
|
{% endif %}
|
||||||
|
with_items: "{{ libvirt_vm_volumes }}"
|
||||||
|
register: volume_result
|
||||||
|
changed_when:
|
||||||
|
- "{{ volume_result | success }}"
|
||||||
|
- "{{ (volume_result.stdout | from_json).changed | default(True) }}"
|
30
ansible/roles/libvirt-vm/templates/vm.xml.j2
Normal file
30
ansible/roles/libvirt-vm/templates/vm.xml.j2
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
<domain type='kvm'>
|
||||||
|
<name>{{ libvirt_vm_name }}</name>
|
||||||
|
<memory>{{ libvirt_vm_memory_mb | int * 1024 }}</memory>
|
||||||
|
<vcpu>{{ libvirt_vm_vcpus }}</vcpu>
|
||||||
|
<clock sync="localtime"/>
|
||||||
|
<os>
|
||||||
|
<type arch='x86_64'>hvm</type>
|
||||||
|
</os>
|
||||||
|
<devices>
|
||||||
|
{% for volume in libvirt_vm_volumes %}
|
||||||
|
<disk type='volume' device='{{ volume.device | default('disk') }}'>
|
||||||
|
<driver name='qemu' type='{{ volume.format }}'/>
|
||||||
|
<source pool='{{ volume.pool }}' volume='{{ volume.name }}'/>
|
||||||
|
<target dev='vd{{ 'abcdefghijklmnopqrstuvwxyz'[loop.index] }}'/>
|
||||||
|
</disk>
|
||||||
|
{% endfor %}
|
||||||
|
{% for interface in libvirt_vm_interfaces %}
|
||||||
|
<interface type='network'>
|
||||||
|
<source network='{{ interface.network }}'/>
|
||||||
|
<model type='virtio'/>
|
||||||
|
</interface>
|
||||||
|
{% endfor %}
|
||||||
|
<serial type='pty'>
|
||||||
|
<target port='0'/>
|
||||||
|
</serial>
|
||||||
|
<console type='pty'>
|
||||||
|
<target type='serial' port='0'/>
|
||||||
|
</console>
|
||||||
|
</devices>
|
||||||
|
</domain>
|
17
ansible/roles/ssh-known-host/tasks/main.yml
Normal file
17
ansible/roles/ssh-known-host/tasks/main.yml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
---
|
||||||
|
- name: Scan for SSH keys
|
||||||
|
local_action:
|
||||||
|
module: command ssh-keyscan {{ item }}
|
||||||
|
with_items:
|
||||||
|
- "{{ ansible_host|default(inventory_hostname) }}"
|
||||||
|
register: keyscan_result
|
||||||
|
changed_when: False
|
||||||
|
|
||||||
|
- name: Ensure SSH keys are in known hosts
|
||||||
|
local_action:
|
||||||
|
module: known_hosts
|
||||||
|
host: "{{ item[0].item }}"
|
||||||
|
key: "{{ item[1] }}"
|
||||||
|
with_subelements:
|
||||||
|
- "{{ keyscan_result.results }}"
|
||||||
|
- stdout_lines
|
34
ansible/roles/swift-setup/defaults/main.yml
Normal file
34
ansible/roles/swift-setup/defaults/main.yml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
---
|
||||||
|
# List of names of block devices to use for Swift.
|
||||||
|
swift_block_devices: []
|
||||||
|
|
||||||
|
# Docker image to use to build rings.
|
||||||
|
swift_image:
|
||||||
|
|
||||||
|
# Host on which to build rings.
|
||||||
|
swift_ring_build_host:
|
||||||
|
|
||||||
|
# Path in which to build ring files.
|
||||||
|
swift_ring_build_path: /tmp/swift-rings
|
||||||
|
|
||||||
|
# Ports on which Swift services listen.
|
||||||
|
swift_service_ports:
|
||||||
|
object: 6000
|
||||||
|
account: 6001
|
||||||
|
container: 6002
|
||||||
|
|
||||||
|
# Base-2 logarithm of the number of partitions.
|
||||||
|
# i.e. num_partitions=2^<swift_part_power>.
|
||||||
|
swift_part_power:
|
||||||
|
|
||||||
|
# Object replication count.
|
||||||
|
swift_replication_count:
|
||||||
|
|
||||||
|
# Minimum time in hours between moving a given partition.
|
||||||
|
swift_min_part_hours:
|
||||||
|
|
||||||
|
# ID of the region for this Swift service.
|
||||||
|
swift_region:
|
||||||
|
|
||||||
|
# ID of the zone for this Swift service.
|
||||||
|
swift_zone:
|
10
ansible/roles/swift-setup/tasks/devices.yml
Normal file
10
ansible/roles/swift-setup/tasks/devices.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Swift partitions exist
|
||||||
|
command: parted /dev/{{ item }} -s -- mklabel gpt mkpart KOLLA_SWIFT_DATA 1 -1
|
||||||
|
with_items: "{{ swift_block_devices }}"
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure Swift XFS file systems exist
|
||||||
|
command: mkfs.xfs -f -L d{{ swift_block_devices.index(item) }} /dev/{{ item }}{% if item.startswith('loop') %}p{% endif %}1
|
||||||
|
with_items: "{{ swift_block_devices }}"
|
||||||
|
become: True
|
3
ansible/roles/swift-setup/tasks/main.yml
Normal file
3
ansible/roles/swift-setup/tasks/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
- include: devices.yml
|
||||||
|
- include: rings.yml
|
75
ansible/roles/swift-setup/tasks/rings.yml
Normal file
75
ansible/roles/swift-setup/tasks/rings.yml
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure Swift ring build directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ swift_ring_build_path }}"
|
||||||
|
state: directory
|
||||||
|
delegate_to: "{{ swift_ring_build_host }}"
|
||||||
|
run_once: True
|
||||||
|
|
||||||
|
- name: Ensure Swift rings are created
|
||||||
|
command: >
|
||||||
|
docker run
|
||||||
|
--rm
|
||||||
|
-v {{ swift_ring_build_path }}/:{{ kolla_config_path }}/config/swift/
|
||||||
|
{{ swift_image }}
|
||||||
|
swift-ring-builder {{ kolla_config_path }}/config/swift/{{ item }}.builder create
|
||||||
|
{{ swift_part_power }}
|
||||||
|
{{ swift_replication_count }}
|
||||||
|
{{ swift_min_part_hours }}
|
||||||
|
with_items: "{{ swift_service_names }}"
|
||||||
|
delegate_to: "{{ swift_ring_build_host }}"
|
||||||
|
run_once: True
|
||||||
|
|
||||||
|
- name: Ensure devices are added to Swift rings
|
||||||
|
command: >
|
||||||
|
docker run
|
||||||
|
--rm
|
||||||
|
-v {{ swift_ring_build_path }}/:{{ kolla_config_path }}/config/swift/
|
||||||
|
{{ swift_image }}
|
||||||
|
swift-ring-builder {{ kolla_config_path }}/config/swift/{{ item[0] }}.builder add
|
||||||
|
--region {{ swift_region }}
|
||||||
|
--zone {{ swift_zone }}
|
||||||
|
--ip {{ internal_net_name | net_ip }}
|
||||||
|
--port {{ swift_service_ports[item[0]] }}
|
||||||
|
--device {{ item[1] }}
|
||||||
|
--weight 100
|
||||||
|
with_nested:
|
||||||
|
- "{{ swift_service_names }}"
|
||||||
|
- "{{ swift_block_devices }}"
|
||||||
|
delegate_to: "{{ swift_ring_build_host }}"
|
||||||
|
|
||||||
|
- name: Ensure Swift rings are rebalanced
|
||||||
|
command: >
|
||||||
|
docker run
|
||||||
|
--rm
|
||||||
|
-v {{ swift_ring_build_path }}/:{{ kolla_config_path }}/config/swift/
|
||||||
|
{{ swift_image }}
|
||||||
|
swift-ring-builder {{ kolla_config_path }}/config/swift/{{ item }}.builder rebalance
|
||||||
|
with_items: "{{ swift_service_names }}"
|
||||||
|
delegate_to: "{{ swift_ring_build_host }}"
|
||||||
|
run_once: True
|
||||||
|
|
||||||
|
- name: Ensure Swift ring files are copied
|
||||||
|
local_action:
|
||||||
|
module: copy
|
||||||
|
src: "{{ swift_ring_build_path }}/{{ item[0] }}.{{ item[1] }}"
|
||||||
|
dest: "{{ kolla_config_path }}/config/swift/{{ item[0] }}.{{ item[1] }}"
|
||||||
|
remote_src: True
|
||||||
|
owner: kolla
|
||||||
|
group: kolla
|
||||||
|
mode: 0644
|
||||||
|
with_nested:
|
||||||
|
- "{{ swift_service_names }}"
|
||||||
|
- - ring.gz
|
||||||
|
- builder
|
||||||
|
delegate_to: "{{ swift_ring_build_host }}"
|
||||||
|
become: True
|
||||||
|
run_once: True
|
||||||
|
|
||||||
|
- name: Remove Swift ring build directory from build host
|
||||||
|
file:
|
||||||
|
path: "{{ swift_ring_build_path }}"
|
||||||
|
state: absent
|
||||||
|
delegate_to: "{{ swift_ring_build_host }}"
|
||||||
|
become: True
|
||||||
|
run_once: True
|
6
ansible/roles/swift-setup/vars/main.yml
Normal file
6
ansible/roles/swift-setup/vars/main.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
# List of names of Swift services.
|
||||||
|
swift_service_names:
|
||||||
|
- object
|
||||||
|
- account
|
||||||
|
- container
|
3
ansible/roles/veth/defaults/main.yml
Normal file
3
ansible/roles/veth/defaults/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# List of virtual interfaces to configure.
|
||||||
|
veth_interfaces: []
|
19
ansible/roles/veth/files/ifdown-veth
Executable file
19
ansible/roles/veth/files/ifdown-veth
Executable file
@ -0,0 +1,19 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Take down a virtual ethernet device pair.
|
||||||
|
|
||||||
|
. /etc/init.d/functions
|
||||||
|
|
||||||
|
cd /etc/sysconfig/network-scripts
|
||||||
|
. ./network-functions
|
||||||
|
|
||||||
|
[ -f ../network ] && . ../network
|
||||||
|
|
||||||
|
CONFIG=${1}
|
||||||
|
|
||||||
|
need_config "${CONFIG}"
|
||||||
|
|
||||||
|
source_config
|
||||||
|
|
||||||
|
./ifdown-eth ${CONFIG} ${2}
|
||||||
|
./ifdown-eth ifcfg-${PEER_DEVICE} ${2}
|
28
ansible/roles/veth/files/ifup-veth
Executable file
28
ansible/roles/veth/files/ifup-veth
Executable file
@ -0,0 +1,28 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Bring up a virtual ethernet device pair.
|
||||||
|
|
||||||
|
. /etc/init.d/functions
|
||||||
|
|
||||||
|
cd /etc/sysconfig/network-scripts
|
||||||
|
. ./network-functions
|
||||||
|
|
||||||
|
[ -f ../network ] && . ../network
|
||||||
|
|
||||||
|
CONFIG=${1}
|
||||||
|
|
||||||
|
need_config "${CONFIG}"
|
||||||
|
|
||||||
|
source_config
|
||||||
|
|
||||||
|
if ! ip link show dev ${DEVICE} >/dev/null 2>&1 ; then
|
||||||
|
echo "Creating veth pair ${DEVICE} - ${PEER_DEVICE}"
|
||||||
|
ip link add dev ${DEVICE} type veth peer name ${PEER_DEVICE}
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "Failed creating veth pair"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
./ifup-eth ${CONFIG} ${2}
|
||||||
|
./ifup-eth ifcfg-${PEER_DEVICE} ${2}
|
43
ansible/roles/veth/tasks/main.yml
Normal file
43
ansible/roles/veth/tasks/main.yml
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure veth sysconfig network control scripts exist
|
||||||
|
copy:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "{{ network_scripts_dir }}/{{ item }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
with_items:
|
||||||
|
- "ifup-veth"
|
||||||
|
- "ifdown-veth"
|
||||||
|
become: True
|
||||||
|
register: ctl_result
|
||||||
|
|
||||||
|
- name: Ensure veth sysconfig network interface files exist
|
||||||
|
template:
|
||||||
|
src: ifcfg-veth.j2
|
||||||
|
dest: "{{ network_scripts_dir }}/ifcfg-{{ item.device }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
with_items: "{{ veth_interfaces }}"
|
||||||
|
become: True
|
||||||
|
register: veth_result
|
||||||
|
|
||||||
|
- name: Ensure veth peer sysconfig network interface files exist
|
||||||
|
template:
|
||||||
|
src: ifcfg-peer.j2
|
||||||
|
dest: "{{ network_scripts_dir }}/ifcfg-{{ item.peer_device }}"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
with_items: "{{ veth_interfaces }}"
|
||||||
|
become: True
|
||||||
|
register: peer_result
|
||||||
|
|
||||||
|
- name: Bounce veth interfaces
|
||||||
|
shell: ifdown {{ item[0].item.device }} ; ifup {{ item[0].item.device }}
|
||||||
|
with_together:
|
||||||
|
- "{{ veth_result.results }}"
|
||||||
|
- "{{ peer_result.results }}"
|
||||||
|
when: "{{ ctl_result|changed or item[0]|changed or item[1]|changed }}"
|
||||||
|
become: True
|
25
ansible/roles/veth/templates/ifcfg-peer.j2
Normal file
25
ansible/roles/veth/templates/ifcfg-peer.j2
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
DEVICE={{ item.peer_device }}
|
||||||
|
{% if item.peer_bootproto == 'static' %}
|
||||||
|
BOOTPROTO=none
|
||||||
|
{% if item.peer_address is defined %}
|
||||||
|
IPADDR={{ item.peer_address }}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.peer_netmask is defined %}
|
||||||
|
NETMASK={{ item.peer_netmask }}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.peer_gateway is defined %}
|
||||||
|
GATEWAY={{ item.peer_gateway }}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.peer_bootproto == 'dhcp' %}
|
||||||
|
BOOTPROTO=dhcp
|
||||||
|
{% endif %}
|
||||||
|
{% if item.onboot is defined %}
|
||||||
|
ONBOOT={{ item.onboot }}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.peer_bridge is defined %}
|
||||||
|
BRIDGE={{ item.peer_bridge }}
|
||||||
|
{% endif %}
|
||||||
|
{% if ansible_distribution_major_version | int >= 7 %}
|
||||||
|
NM_CONTROLLED=no
|
||||||
|
{% endif %}
|
27
ansible/roles/veth/templates/ifcfg-veth.j2
Normal file
27
ansible/roles/veth/templates/ifcfg-veth.j2
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
DEVICE={{ item.device }}
|
||||||
|
TYPE=veth
|
||||||
|
PEER_DEVICE={{ item.peer_device }}
|
||||||
|
{% if item.bootproto == 'static' %}
|
||||||
|
BOOTPROTO=none
|
||||||
|
{% if item.address is defined %}
|
||||||
|
IPADDR={{ item.address }}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.netmask is defined %}
|
||||||
|
NETMASK={{ item.netmask }}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.gateway is defined %}
|
||||||
|
GATEWAY={{ item.gateway }}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.bootproto == 'dhcp' %}
|
||||||
|
BOOTPROTO=dhcp
|
||||||
|
{% endif %}
|
||||||
|
{% if item.onboot is defined %}
|
||||||
|
ONBOOT={{ item.onboot }}
|
||||||
|
{% endif %}
|
||||||
|
{% if item.bridge is defined %}
|
||||||
|
BRIDGE={{ item.bridge }}
|
||||||
|
{% endif %}
|
||||||
|
{% if ansible_distribution_major_version | int >= 7 %}
|
||||||
|
NM_CONTROLLED=no
|
||||||
|
{% endif %}
|
2
ansible/roles/veth/vars/main.yml
Normal file
2
ansible/roles/veth/vars/main.yml
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
---
|
||||||
|
network_scripts_dir: "/etc/sysconfig/network-scripts"
|
77
ansible/seed-vm.yml
Normal file
77
ansible/seed-vm.yml
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure that the seed VM configdrive exists
|
||||||
|
hosts: seed-hypervisor
|
||||||
|
vars:
|
||||||
|
seed_host: "{{ groups['seed'][0] }}"
|
||||||
|
seed_hostvars: "{{ hostvars[seed_host] }}"
|
||||||
|
pre_tasks:
|
||||||
|
- name: Ensure the image cache directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ image_cache_path }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: jriguera.configdrive
|
||||||
|
# For now assume the VM OS family is the same as the hypervisor's.
|
||||||
|
configdrive_os_family: "{{ ansible_os_family }}"
|
||||||
|
configdrive_uuid: "{{ seed_host | to_uuid }}"
|
||||||
|
configdrive_fqdn: "{{ seed_host }}"
|
||||||
|
configdrive_name: "{{ seed_host }}"
|
||||||
|
configdrive_ssh_public_key: "{{ lookup('file', '{{ ansible_user_dir }}/.ssh/id_rsa.pub') }}"
|
||||||
|
configdrive_config_dir: "{{ image_cache_path }}"
|
||||||
|
configdrive_volume_path: "{{ image_cache_path }}"
|
||||||
|
configdrive_config_dir_delete: True
|
||||||
|
configdrive_resolv:
|
||||||
|
domain: "{{ seed_hostvars.resolv_domain | default }}"
|
||||||
|
search: "{{ seed_hostvars.resolv_search | default }}"
|
||||||
|
dns: "{{ seed_hostvars.resolv_nameservers | default([]) }}"
|
||||||
|
configdrive_network_device_list: >
|
||||||
|
{{ seed_hostvars.seed_vm_interfaces |
|
||||||
|
map(attribute='net_name') |
|
||||||
|
map('net_configdrive_network_device', seed_host) |
|
||||||
|
list }}
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: Set a fact containing the configdrive image path
|
||||||
|
set_fact:
|
||||||
|
seed_vm_configdrive_path: "{{ image_cache_path }}/{{ seed_host }}.iso"
|
||||||
|
|
||||||
|
- name: Ensure configdrive is decoded and decompressed
|
||||||
|
shell: >
|
||||||
|
base64 -d {{ image_cache_path }}/{{ seed_host | to_uuid }}.gz
|
||||||
|
| gunzip
|
||||||
|
> {{ seed_vm_configdrive_path }}
|
||||||
|
|
||||||
|
- name: Ensure compressed configdrive is removed
|
||||||
|
file:
|
||||||
|
path: "{{ image_cache_path }}/{{ seed_host | to_uuid }}.gz"
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Ensure that the seed VM is provisioned
|
||||||
|
hosts: seed-hypervisor
|
||||||
|
vars:
|
||||||
|
seed_host: "{{ groups['seed'][0] }}"
|
||||||
|
seed_hostvars: "{{ hostvars[seed_host] }}"
|
||||||
|
pre_tasks:
|
||||||
|
- name: Check the size of the configdrive
|
||||||
|
stat:
|
||||||
|
path: "{{ seed_vm_configdrive_path }}"
|
||||||
|
register: stat_result
|
||||||
|
|
||||||
|
roles:
|
||||||
|
- role: libvirt-vm
|
||||||
|
seed_vm_configdrive_volume:
|
||||||
|
name: "{{ seed_hostvars.seed_vm_name }}-configdrive"
|
||||||
|
pool: "{{ seed_hostvars.seed_vm_pool }}"
|
||||||
|
# Round size up to next multiple of 4096.
|
||||||
|
capacity: "{{ (stat_result.stat.size + 4095) // 4096 * 4096 }}"
|
||||||
|
device: "cdrom"
|
||||||
|
format: "raw"
|
||||||
|
image: "{{ seed_vm_configdrive_path }}"
|
||||||
|
libvirt_vm_name: "{{ seed_hostvars.seed_vm_name }}"
|
||||||
|
libvirt_vm_memory_mb: "{{ seed_hostvars.seed_vm_memory_mb }}"
|
||||||
|
libvirt_vm_vcpus: "{{ seed_hostvars.seed_vm_vcpus }}"
|
||||||
|
libvirt_vm_volumes: "{{ seed_hostvars.seed_vm_volumes + [seed_vm_configdrive_volume] }}"
|
||||||
|
libvirt_vm_interfaces: "{{ seed_hostvars.seed_vm_interfaces }}"
|
||||||
|
libvirt_vm_image_cache_path: "{{ image_cache_path }}"
|
||||||
|
become: True
|
7
ansible/ssh-known-host.yml
Normal file
7
ansible/ssh-known-host.yml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure known hosts are configured
|
||||||
|
hosts: all
|
||||||
|
gather_facts: no
|
||||||
|
roles:
|
||||||
|
- role: ssh-known-host
|
||||||
|
|
11
ansible/swift-setup.yml
Normal file
11
ansible/swift-setup.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
---
|
||||||
|
- hosts: controllers
|
||||||
|
roles:
|
||||||
|
- role: swift-setup
|
||||||
|
swift_image: "kolla/{{ kolla_base_distro }}-{{ kolla_install_type }}-swift-base:{{ kolla_openstack_release }}"
|
||||||
|
swift_ring_build_host: "{{ groups['controllers'][0] }}"
|
||||||
|
# ID of the region for this Swift service.
|
||||||
|
swift_region: 1
|
||||||
|
# ID of the zone for this Swift service.
|
||||||
|
swift_zone: "{{ groups['controllers'].index(inventory_hostname) % swift_num_zones }}"
|
||||||
|
when: "{{ kolla_enable_swift | bool }}"
|
35
ansible/test-image-centos-cloud.yml
Normal file
35
ansible/test-image-centos-cloud.yml
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
---
|
||||||
|
- hosts: seed[0]
|
||||||
|
vars:
|
||||||
|
openstack_auth:
|
||||||
|
auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
|
||||||
|
username: "{{ lookup('env', 'OS_USERNAME') }}"
|
||||||
|
password: "{{ lookup('env', 'OS_PASSWORD') }}"
|
||||||
|
project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
|
||||||
|
project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
|
||||||
|
user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
|
||||||
|
image_download_dir: "{{ ansible_user_dir }}/images"
|
||||||
|
tasks:
|
||||||
|
- name: Ensure OpenStack shade module is installed
|
||||||
|
pip:
|
||||||
|
name: shade
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure image download directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ image_download_dir }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Ensure CentOS 7 cloud image is downloaded
|
||||||
|
get_url:
|
||||||
|
url: http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
|
||||||
|
dest: "{{ image_download_dir }}/CentOS-7-x86_64-GenericCloud.qcow2"
|
||||||
|
|
||||||
|
- name: Ensure test deployment image is registered with Glance
|
||||||
|
os_image:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: centos7
|
||||||
|
container_format: bare
|
||||||
|
disk_format: qcow2
|
||||||
|
state: present
|
||||||
|
filename: "{{ image_download_dir }}/CentOS-7-x86_64-GenericCloud.qcow2"
|
87
ansible/test-image.yml
Normal file
87
ansible/test-image.yml
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure user images are built and registered with Glance
|
||||||
|
hosts: seed[0]
|
||||||
|
vars:
|
||||||
|
openstack_auth:
|
||||||
|
auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
|
||||||
|
username: "{{ lookup('env', 'OS_USERNAME') }}"
|
||||||
|
password: "{{ lookup('env', 'OS_PASSWORD') }}"
|
||||||
|
project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
|
||||||
|
project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
|
||||||
|
user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
|
||||||
|
image_build_dir: "{{ ansible_user_dir }}/images/dib"
|
||||||
|
image_name: centos7
|
||||||
|
image_os_element: centos7
|
||||||
|
image_base_elements:
|
||||||
|
- dhcp-all-interfaces
|
||||||
|
image_is_whole_disk: True
|
||||||
|
image_whole_disk_elements:
|
||||||
|
- vm
|
||||||
|
image_partition_elements:
|
||||||
|
- baremetal
|
||||||
|
- grub2
|
||||||
|
image_extra_elements: []
|
||||||
|
image_elements: "{{ image_base_elements + (image_whole_disk_elements if image_is_whole_disk|bool else image_partition_elements) + image_extra_elements }}"
|
||||||
|
tasks:
|
||||||
|
- name: Ensure diskimage-builder package is installed
|
||||||
|
yum:
|
||||||
|
name: diskimage-builder
|
||||||
|
state: installed
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure OpenStack shade module is installed
|
||||||
|
pip:
|
||||||
|
name: shade
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure image build directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ image_build_dir }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Display image elements
|
||||||
|
debug:
|
||||||
|
var: image_elements
|
||||||
|
|
||||||
|
- name: Ensure CentOS 7 image is built
|
||||||
|
command: >
|
||||||
|
disk-image-create
|
||||||
|
{{ image_os_element }}
|
||||||
|
{{ image_elements|join(' ') }}
|
||||||
|
-o {{ image_name }}
|
||||||
|
args:
|
||||||
|
chdir: "{{ image_build_dir }}"
|
||||||
|
creates: "{{ image_build_dir }}/{{ image_name }}.qcow2"
|
||||||
|
|
||||||
|
- name: Ensure test deployment ramdisk and kernel images are registered with Glance
|
||||||
|
os_image:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: "{{ image_name }}.{{ item.ext }}"
|
||||||
|
container_format: "{{ item.container_format }}"
|
||||||
|
disk_format: "{{ item.disk_format }}"
|
||||||
|
filename: "{{ image_build_dir }}/{{ image_name }}.{{ item.ext }}"
|
||||||
|
state: present
|
||||||
|
with_items:
|
||||||
|
- { container_format: ari, disk_format: ari, ext: initrd }
|
||||||
|
- { container_format: aki, disk_format: aki, ext: vmlinuz }
|
||||||
|
register: r_and_k_result
|
||||||
|
when: not image_is_whole_disk|bool
|
||||||
|
|
||||||
|
- name: Ensure test deployment image is registered with Glance
|
||||||
|
os_image:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: "{{ image_name }}"
|
||||||
|
container_format: bare
|
||||||
|
disk_format: qcow2
|
||||||
|
filename: "{{ image_build_dir }}/{{ image_name }}.qcow2"
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# FIXME: This does not seem to work :(
|
||||||
|
- name: Ensure test deployment image has kernel and ramdisk properties
|
||||||
|
os_image:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: "{{ image_name }}"
|
||||||
|
ramdisk: "{{ image_name }}.initrd"
|
||||||
|
kernel: "{{ image_name }}.vmlinuz"
|
||||||
|
state: present
|
||||||
|
when: not image_is_whole_disk|bool
|
58
ansible/test-infra.yml
Normal file
58
ansible/test-infra.yml
Normal file
@ -0,0 +1,58 @@
|
|||||||
|
---
|
||||||
|
- hosts: seed[0]
|
||||||
|
vars:
|
||||||
|
openstack_auth:
|
||||||
|
auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
|
||||||
|
username: "{{ lookup('env', 'OS_USERNAME') }}"
|
||||||
|
password: "{{ lookup('env', 'OS_PASSWORD') }}"
|
||||||
|
project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
|
||||||
|
project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
|
||||||
|
user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
|
||||||
|
image_download_dir: "{{ ansible_user_dir }}/images"
|
||||||
|
tasks:
|
||||||
|
- name: Ensure OpenStack shade module is installed
|
||||||
|
pip:
|
||||||
|
name: shade
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure image download directory exists
|
||||||
|
file:
|
||||||
|
path: "{{ image_download_dir }}"
|
||||||
|
state: directory
|
||||||
|
|
||||||
|
- name: Ensure Ironic CoreOS IPA deploy images are downloaded
|
||||||
|
unarchive:
|
||||||
|
src: http://tarballs.openstack.org/ironic-python-agent/coreos/ipa-coreos-stable-newton.tar.gz
|
||||||
|
dest: "{{ image_download_dir }}"
|
||||||
|
remote_src: yes
|
||||||
|
|
||||||
|
- name: Ensure Ironic CoreOS IPA deploy images are registered with Glance
|
||||||
|
os_image:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: "{{ item.name }}"
|
||||||
|
container_format: "{{ item.format }}"
|
||||||
|
disk_format: "{{ item.format }}"
|
||||||
|
state: present
|
||||||
|
filename: "{{ image_download_dir }}/imagebuild/coreos/UPLOAD/{{ item.filename }}"
|
||||||
|
with_items:
|
||||||
|
- { name: ipa.initrd, filename: coreos_production_pxe_image-oem-stable-newton.cpio.gz, format: ari }
|
||||||
|
- { name: ipa.vmlinuz, filename: coreos_production_pxe-stable-newton.vmlinuz, format: aki }
|
||||||
|
|
||||||
|
- name: Ensure provisioning network is registered with Neutron
|
||||||
|
os_network:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: provision-net
|
||||||
|
provider_network_type: flat
|
||||||
|
provider_physical_network: physnet1
|
||||||
|
shared: True
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Ensure provisioning subnet is registered with Neutron
|
||||||
|
os_subnet:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: provision-subnet
|
||||||
|
network_name: provision-net
|
||||||
|
cidr: "{{ provision_net_cidr }}"
|
||||||
|
allocation_pool_start: "{{ provision_net_allocation_pool_start | default(omit) }}"
|
||||||
|
allocation_pool_end: "{{ provision_net_allocation_pool_end | default(omit) }}"
|
||||||
|
state: present
|
24
ansible/test-keypair.yml
Normal file
24
ansible/test-keypair.yml
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
---
|
||||||
|
- name: Ensure user SSH keypair is registered with Nova
|
||||||
|
hosts: seed[0]
|
||||||
|
vars:
|
||||||
|
openstack_auth:
|
||||||
|
auth_url: "{{ lookup('env', 'OS_AUTH_URL') }}"
|
||||||
|
username: "{{ lookup('env', 'OS_USERNAME') }}"
|
||||||
|
password: "{{ lookup('env', 'OS_PASSWORD') }}"
|
||||||
|
project_name: "{{ lookup('env', 'OS_PROJECT_NAME') }}"
|
||||||
|
project_domain_name: "{{ lookup('env', 'OS_PROJECT_DOMAIN_NAME') }}"
|
||||||
|
user_domain_name: "{{ lookup('env', 'OS_USER_DOMAIN_NAME') }}"
|
||||||
|
public_key_file: "{{ ansible_user_dir }}/.ssh/id_rsa.pub"
|
||||||
|
tasks:
|
||||||
|
- name: Ensure OpenStack shade module is installed
|
||||||
|
pip:
|
||||||
|
name: shade
|
||||||
|
become: True
|
||||||
|
|
||||||
|
- name: Ensure a test SSH key pair is registered with Nova
|
||||||
|
os_keypair:
|
||||||
|
auth: "{{ openstack_auth }}"
|
||||||
|
name: test
|
||||||
|
public_key_file: "{{ public_key_file }}"
|
||||||
|
state: present
|
55
bootstrap.sh
Executable file
55
bootstrap.sh
Executable file
@ -0,0 +1,55 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
function run_playbook {
|
||||||
|
KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
|
||||||
|
# Ansible fails silently if the inventory does not exist.
|
||||||
|
test -e ${KAYOBE_CONFIG_PATH}/inventory
|
||||||
|
ansible-playbook \
|
||||||
|
-i ${KAYOBE_CONFIG_PATH}/inventory \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/globals.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/dns.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/kolla.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/networks.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/ntp.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/swift.yml \
|
||||||
|
$@
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_ansible {
|
||||||
|
if [[ -f /etc/centos-release ]]; then
|
||||||
|
sudo yum -y install epel-release
|
||||||
|
elif [[ -f /etc/redhat-release ]]; then
|
||||||
|
sudo subscription-manager repos --enable=qci-1.0-for-rhel-7-rpms
|
||||||
|
if ! yum info epel-release >/dev/null 2>&1 ; then
|
||||||
|
sudo yum -y install \
|
||||||
|
https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
sudo yum -y install ansible
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_ansible_roles {
|
||||||
|
ansible-galaxy install \
|
||||||
|
--roles-path ansible/roles \
|
||||||
|
--role-file ansible/requirements.yml
|
||||||
|
}
|
||||||
|
|
||||||
|
function bootstrap {
|
||||||
|
run_playbook ansible/bootstrap.yml
|
||||||
|
}
|
||||||
|
|
||||||
|
function install_kolla {
|
||||||
|
run_playbook ansible/kolla.yml
|
||||||
|
}
|
||||||
|
|
||||||
|
function main {
|
||||||
|
install_ansible
|
||||||
|
install_ansible_roles
|
||||||
|
bootstrap
|
||||||
|
install_kolla
|
||||||
|
}
|
||||||
|
|
||||||
|
main $*
|
76
configure-kayobe.sh
Executable file
76
configure-kayobe.sh
Executable file
@ -0,0 +1,76 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
function configure_kayobe {
|
||||||
|
KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
|
||||||
|
sudo yum -y install python-netaddr
|
||||||
|
sudo mkdir -p ${KAYOBE_CONFIG_PATH}
|
||||||
|
sudo chown ${USER}:${USER} ${KAYOBE_CONFIG_PATH}
|
||||||
|
chmod 755 ${KAYOBE_CONFIG_PATH}
|
||||||
|
cp -r etc/kayobe/* ${KAYOBE_CONFIG_PATH}
|
||||||
|
my_interface=$(ip route get 8.8.8.8 | awk '{ print $5 }')
|
||||||
|
my_ip=$(ip route get 8.8.8.8 | awk '{ print $7 }')
|
||||||
|
gateway=$(ip route get 8.8.8.8 | awk '{ print $3 }')
|
||||||
|
cidr=$(ip a show $my_interface | awk '$1 == "inet" { print $2 }')
|
||||||
|
vip=$(python -c "import netaddr; a = netaddr.IPAddress('$my_ip'); print a+2")
|
||||||
|
cat >> ${KAYOBE_CONFIG_PATH}/ntp.yml << EOF
|
||||||
|
|
||||||
|
#######################################################
|
||||||
|
# Local config
|
||||||
|
timezone: GMT
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat >> ${KAYOBE_CONFIG_PATH}/networks.yml << EOF
|
||||||
|
|
||||||
|
#######################################################
|
||||||
|
# Local config
|
||||||
|
provision_oc_net_name: 'the_net'
|
||||||
|
provision_wl_net_name: 'the_net'
|
||||||
|
internal_net_name: 'the_net'
|
||||||
|
external_net_name: 'the_net'
|
||||||
|
storage_net_name: 'the_net'
|
||||||
|
storage_mgmt_net_name: 'the_net'
|
||||||
|
|
||||||
|
the_net_vip_address: ${vip}
|
||||||
|
the_net_cidr: ${cidr}
|
||||||
|
the_net_gateway: ${gateway}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > ${KAYOBE_CONFIG_PATH}/network-allocation.yml << EOF
|
||||||
|
---
|
||||||
|
the_net_ips:
|
||||||
|
localhost: ${my_ip}
|
||||||
|
EOF
|
||||||
|
|
||||||
|
cat > ${KAYOBE_CONFIG_PATH}/inventory/hosts << EOF
|
||||||
|
[config-mgmt]
|
||||||
|
# This host acts as the configuration management control host. This must be
|
||||||
|
# localhost.
|
||||||
|
localhost ansible_connection=local
|
||||||
|
|
||||||
|
[seed]
|
||||||
|
# This host will provide the Bifrost undercloud.
|
||||||
|
localhost ansible_host=127.0.0.1
|
||||||
|
|
||||||
|
[controllers]
|
||||||
|
# These hosts will provide the OpenStack overcloud.
|
||||||
|
EOF
|
||||||
|
|
||||||
|
if [[ -e ~/kayobe-env ]] ; then
|
||||||
|
for controller_ip in $(python -c "import json
|
||||||
|
with open('/home/centos/kayobe-env') as f:
|
||||||
|
cfg = json.load(f)
|
||||||
|
for ctl_ip in cfg['controller_ips']:
|
||||||
|
print ctl_ip"); do
|
||||||
|
echo " '$controller_ip': $controller_ip" >> ${KAYOBE_CONFIG_PATH}/network-allocation.yml
|
||||||
|
echo $controller_ip >> ${KAYOBE_CONFIG_PATH}/inventory/hosts
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
function main {
|
||||||
|
configure_kayobe
|
||||||
|
}
|
||||||
|
|
||||||
|
main $@
|
@ -2,68 +2,58 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
###########################################################
|
function run_playbook {
|
||||||
# Overcloud
|
KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
|
||||||
|
# Ansible fails silently if the inventory does not exist.
|
||||||
function configure_overcloud_network {
|
test -e ${KAYOBE_CONFIG_PATH}/inventory
|
||||||
echo "TODO: configure overcloud network"
|
ansible-playbook \
|
||||||
|
-i ${KAYOBE_CONFIG_PATH}/inventory \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/globals.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/dns.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/kolla.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/networks.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/ntp.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/swift.yml \
|
||||||
|
$@
|
||||||
}
|
}
|
||||||
|
|
||||||
function configure_overcloud_bios_and_raid {
|
function run_kolla_ansible {
|
||||||
echo "TODO: configure overcloud BIOS and RAID"
|
export KOLLA_CONFIG_PATH=${KOLLA_CONFIG_PATH:-/etc/kolla}
|
||||||
|
# Ansible fails silently if the inventory does not exist.
|
||||||
|
test -e ${KOLLA_CONFIG_PATH}/inventory/overcloud
|
||||||
|
KOLLA_VENV=$(pwd)/ansible/kolla-venv
|
||||||
|
source ${KOLLA_VENV}/bin/activate
|
||||||
|
kolla-ansible \
|
||||||
|
--configdir ${KOLLA_CONFIG_PATH} \
|
||||||
|
--passwords ${KOLLA_CONFIG_PATH}/passwords.yml \
|
||||||
|
-i ${KOLLA_CONFIG_PATH}/inventory/overcloud \
|
||||||
|
$@
|
||||||
|
deactivate
|
||||||
}
|
}
|
||||||
|
|
||||||
function deploy_overcloud_servers {
|
function configure_os {
|
||||||
# Deploy servers with Bifrost
|
run_playbook ansible/ssh-known-host.yml -l controllers
|
||||||
kolla-ansible deploy-servers -i /etc/kolla/inventory/seed
|
run_playbook ansible/disable-selinux.yml -l controllers
|
||||||
|
run_playbook ansible/network.yml -l controllers
|
||||||
|
run_playbook ansible/ntp.yml -l controllers
|
||||||
|
run_kolla_ansible bootstrap-servers -e ansible_user=${USER}
|
||||||
|
run_playbook ansible/kolla-host.yml -l controllers
|
||||||
|
run_playbook ansible/docker.yml -l controllers
|
||||||
}
|
}
|
||||||
|
|
||||||
function configure_overcloud_os {
|
function deploy_services {
|
||||||
#ansible controllers -b -i /etc/kolla/inventory/overcloud -m yum -a 'name=[epel-release, centos-release-openstack-newton]'
|
run_playbook ansible/kolla-openstack.yml
|
||||||
#ansible controllers -b -i /etc/kolla/inventory/overcloud -m yum -a 'name=[python-pip, vim]'
|
run_playbook ansible/swift-setup.yml
|
||||||
|
run_kolla_ansible pull
|
||||||
# Disable SELiunx
|
run_kolla_ansible prechecks
|
||||||
ansible controllers -b -i /etc/kolla/inventory/overcloud -m selinux -a 'state=disabled'
|
run_kolla_ansible deploy
|
||||||
ansible controllers -b -i /etc/kolla/inventory/overcloud -m command -a 'reboot -f' &
|
run_kolla_ansible post-deploy
|
||||||
|
|
||||||
# Wait for nodes to come back up
|
|
||||||
echo "Waiting for overcloud nodes to come back up"
|
|
||||||
while true ; do
|
|
||||||
ansible controllers -i /etc/kolla/inventory/overcloud -m command -a 'hostname' && break
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
function bootstrap_overcloud_kolla {
|
|
||||||
# TODO
|
|
||||||
# Bootstrap seed node
|
|
||||||
kolla-ansible bootstrap-servers -i /etc/kolla/inventory/overcloud
|
|
||||||
ansible controllers -i /etc/kolla/inventory/overcloud -m command -a 'docker ps'
|
|
||||||
ansible controllers -b -i /etc/kolla/inventory/overcloud -m service -a 'name=ntpd state=started enabled=yes'
|
|
||||||
}
|
|
||||||
|
|
||||||
function configure_overcloud_docker {
|
|
||||||
echo "TODO: configure overcloud docker"
|
|
||||||
}
|
|
||||||
|
|
||||||
function pull_overcloud_images {
|
|
||||||
kolla-ansible pull -i /etc/kolla/inventory/overcloud
|
|
||||||
}
|
|
||||||
|
|
||||||
function deploy_overcloud_services {
|
|
||||||
kolla-ansible prechecks -i /etc/kolla/inventory/overcloud
|
|
||||||
kolla-ansible deploy -i /etc/kolla/inventory/overcloud
|
|
||||||
kolla-ansible post-deploy -i /etc/kolla/inventory/overcloud
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function deploy_overcloud {
|
function deploy_overcloud {
|
||||||
configure_overcloud_network
|
configure_os
|
||||||
configure_overcloud_bios_and_raid
|
deploy_services
|
||||||
deploy_overcloud_servers
|
|
||||||
configure_overcloud_os
|
|
||||||
bootstrap_overcloud_kolla
|
|
||||||
configure_overcloud_docker
|
|
||||||
pull_overcloud_images
|
|
||||||
deploy_overcloud_services
|
|
||||||
}
|
}
|
||||||
|
|
||||||
###########################################################
|
###########################################################
|
||||||
|
636
deploy-seed.sh
636
deploy-seed.sh
@ -2,605 +2,58 @@
|
|||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
###########################################################
|
function run_playbook {
|
||||||
# Seed node
|
KAYOBE_CONFIG_PATH=${KAYOBE_CONFIG_PATH:-/etc/kayobe}
|
||||||
|
# Ansible fails silently if the inventory does not exist.
|
||||||
function configure_seed_os {
|
test -e ${KAYOBE_CONFIG_PATH}/inventory
|
||||||
sudo yum -y install epel-release
|
ansible-playbook \
|
||||||
sudo yum -y install ansible
|
-i ${KAYOBE_CONFIG_PATH}/inventory \
|
||||||
sudo yum -y install git vim
|
-e @${KAYOBE_CONFIG_PATH}/bifrost.yml \
|
||||||
|
-e @${KAYOBE_CONFIG_PATH}/dns.yml \
|
||||||
# Generate an SSH key
|
-e @${KAYOBE_CONFIG_PATH}/globals.yml \
|
||||||
if [[ ! -f ~/.ssh/id_rsa ]]; then
|
-e @${KAYOBE_CONFIG_PATH}/kolla.yml \
|
||||||
ssh-keygen -N '' -f ~/.ssh/id_rsa
|
-e @${KAYOBE_CONFIG_PATH}/networks.yml \
|
||||||
fi
|
-e @${KAYOBE_CONFIG_PATH}/network-allocation.yml \
|
||||||
ansible localhost -m authorized_key -a "user=$(whoami) key='$(cat ~/.ssh/id_rsa.pub)'"
|
-e @${KAYOBE_CONFIG_PATH}/ntp.yml \
|
||||||
ssh-keyscan 127.0.0.1 >> ~/.ssh/known_hosts
|
-e @${KAYOBE_CONFIG_PATH}/swift.yml \
|
||||||
ssh-keyscan localhost >> ~/.ssh/known_hosts
|
$@
|
||||||
|
|
||||||
# Disable SELiunx
|
|
||||||
if selinuxenabled && [[ $(getenforce) = 'Enforcing' ]] ; then
|
|
||||||
echo "Disabling SELinux and rebooting. Re-run this script"
|
|
||||||
ansible localhost -b -m selinux -a 'state=disabled'
|
|
||||||
sudo reboot -f
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function install_kolla {
|
function run_kolla_ansible {
|
||||||
# Install kolla
|
export KOLLA_CONFIG_PATH=${KOLLA_CONFIG_PATH:-/etc/kolla}
|
||||||
sudo yum -y install gcc python-devel python-pip libffi-devel openssl-devel
|
# Ansible fails silently if the inventory does not exist.
|
||||||
#sudo yum -y install centos-release-openstack-newton
|
test -e ${KOLLA_CONFIG_PATH}/inventory/seed
|
||||||
#sudo yum -y install python-openstackclient python-neutronclient
|
KOLLA_VENV=$(pwd)/ansible/kolla-venv
|
||||||
|
source ${KOLLA_VENV}/bin/activate
|
||||||
sudo pip install 'kolla<4.0.0'
|
kolla-ansible \
|
||||||
set +e
|
--configdir ${KOLLA_CONFIG_PATH} \
|
||||||
sudo yum -y install patch
|
--passwords ${KOLLA_CONFIG_PATH}/passwords.yml \
|
||||||
sudo patch -u -f /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml << EOF
|
-i ${KOLLA_CONFIG_PATH}/inventory/seed \
|
||||||
--- /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml.old 2017-01-06 17:23:12.444746830 +0000
|
$@
|
||||||
+++ /usr/share/kolla/ansible/roles/baremetal/tasks/pre-install.yml 2017-01-06 17:22:27.864278879 +0000
|
deactivate
|
||||||
@@ -28,6 +28,7 @@
|
|
||||||
{% for host in groups['all'] %}
|
|
||||||
{{ hostvars[host]['ansible_' + hostvars[host]['api_interface']]['ipv4']['address'] }} {{ hostvars[host]['ansible_hostname'] }}
|
|
||||||
{% endfor %}
|
|
||||||
+ become: True
|
|
||||||
when: customize_etc_hosts | bool
|
|
||||||
|
|
||||||
- name: ensure sudo group is present
|
|
||||||
@@ -126,7 +127,7 @@
|
|
||||||
recurse: yes
|
|
||||||
owner: kolla
|
|
||||||
group: kolla
|
|
||||||
- mode: 755
|
|
||||||
+ mode: 0755
|
|
||||||
become: True
|
|
||||||
when: create_kolla_user | bool == True
|
|
||||||
|
|
||||||
@@ -135,6 +136,6 @@
|
|
||||||
path: /etc/kolla
|
|
||||||
state: directory
|
|
||||||
recurse: yes
|
|
||||||
- mode: 666
|
|
||||||
+ mode: 0666
|
|
||||||
become: True
|
|
||||||
when: create_kolla_user | bool == False
|
|
||||||
EOF
|
|
||||||
sudo patch -u -f /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2 << EOF
|
|
||||||
--- /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2.old 2017-01-06 13:56:52.881061188 +0000
|
|
||||||
+++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-api.json.j2 2017-01-06 14:00:21.757338271 +0000
|
|
||||||
@@ -10,7 +10,7 @@
|
|
||||||
],
|
|
||||||
"permissions": [
|
|
||||||
{
|
|
||||||
- "path": "/var/log/kolla/ironic"
|
|
||||||
+ "path": "/var/log/kolla/ironic",
|
|
||||||
"owner": "ironic:ironic",
|
|
||||||
"recurse": true
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
sudo patch -u -f /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2 << EOF
|
|
||||||
--- /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2.old 2017-01-06 14:28:35.048365453 +0000
|
|
||||||
+++ /usr/share/kolla/ansible/roles/ironic/templates/ironic-conductor.json.j2 2017-01-06 14:28:44.858467071 +0000
|
|
||||||
@@ -20,7 +20,7 @@
|
|
||||||
"recurse": true
|
|
||||||
},
|
|
||||||
{
|
|
||||||
- "path": "/tftpboot"
|
|
||||||
+ "path": "/tftpboot",
|
|
||||||
"owner": "ironic:ironic",
|
|
||||||
"recurse": true
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
set -e
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function configure_kolla {
|
function configure_os {
|
||||||
# Configure Kolla
|
run_playbook ansible/ssh-known-host.yml -l seed
|
||||||
if [[ -d /etc/kolla ]]; then
|
run_playbook ansible/disable-selinux.yml -l seed
|
||||||
sudo mv /etc/kolla /etc/kolla.old.$(date +%s)
|
run_playbook ansible/network.yml -l seed
|
||||||
fi
|
run_playbook ansible/ntp.yml -l seed
|
||||||
sudo mkdir -p /etc/kolla
|
run_kolla_ansible bootstrap-servers -e ansible_user=${USER}
|
||||||
sudo chown $(whoami):$(whoami) /etc/kolla
|
run_playbook ansible/kolla-host.yml -l seed
|
||||||
mkdir -p /etc/kolla/config /etc/kolla/inventory
|
run_playbook ansible/docker.yml -l seed
|
||||||
cat > /etc/kolla/inventory/seed << EOF
|
|
||||||
# Simple inventory for bootstrapping Kolla control host.
|
|
||||||
[baremetal]
|
|
||||||
seed ansible_host=127.0.0.1 ansible_user=kolla
|
|
||||||
EOF
|
|
||||||
|
|
||||||
cat > /etc/kolla/inventory/overcloud << EOF
|
|
||||||
[controllers]
|
|
||||||
# These hostname must be resolvable from your deployment host
|
|
||||||
control01 ansible_host=${CONTROLLER_IP} ansible_user=kolla ansible_become=true
|
|
||||||
|
|
||||||
# These initial groups are the only groups required to be modified. The
|
|
||||||
# additional groups are for more control of the environment.
|
|
||||||
[control:children]
|
|
||||||
controllers
|
|
||||||
|
|
||||||
# The network nodes are where your l3-agent and loadbalancers will run
|
|
||||||
# This can be the same as a host in the control group
|
|
||||||
[network:children]
|
|
||||||
controllers
|
|
||||||
|
|
||||||
[compute:children]
|
|
||||||
controllers
|
|
||||||
|
|
||||||
[monitoring:children]
|
|
||||||
controllers
|
|
||||||
|
|
||||||
[storage:children]
|
|
||||||
controllers
|
|
||||||
|
|
||||||
[baremetal:children]
|
|
||||||
control
|
|
||||||
network
|
|
||||||
compute
|
|
||||||
storage
|
|
||||||
monitoring
|
|
||||||
|
|
||||||
# You can explicitly specify which hosts run each project by updating the
|
|
||||||
# groups in the sections below. Common services are grouped together.
|
|
||||||
[collectd:children]
|
|
||||||
compute
|
|
||||||
|
|
||||||
[grafana:children]
|
|
||||||
monitoring
|
|
||||||
|
|
||||||
[etcd:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[influxdb:children]
|
|
||||||
monitoring
|
|
||||||
|
|
||||||
[kibana:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[telegraf:children]
|
|
||||||
monitoring
|
|
||||||
|
|
||||||
[elasticsearch:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[haproxy:children]
|
|
||||||
network
|
|
||||||
|
|
||||||
[mariadb:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[rabbitmq:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[mongodb:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[keystone:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[glance:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[nova:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[neutron:children]
|
|
||||||
network
|
|
||||||
|
|
||||||
[cinder:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[cloudkitty:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[memcached:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[horizon:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[swift:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[barbican:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[heat:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[murano:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[ironic:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[ceph:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[magnum:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[sahara:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[mistral:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[manila:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[ceilometer:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[aodh:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[congress:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[gnocchi:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
# Tempest
|
|
||||||
[tempest:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[senlin:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[vmtp:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[watcher:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[rally:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
# Additional control implemented here. These groups allow you to control which
|
|
||||||
# services run on which hosts at a per-service level.
|
|
||||||
#
|
|
||||||
# Word of caution: Some services are required to run on the same host to
|
|
||||||
# function appropriately. For example, neutron-metadata-agent must run on the
|
|
||||||
# same host as the l3-agent and (depending on configuration) the dhcp-agent.
|
|
||||||
|
|
||||||
# Glance
|
|
||||||
[glance-api:children]
|
|
||||||
glance
|
|
||||||
|
|
||||||
[glance-registry:children]
|
|
||||||
glance
|
|
||||||
|
|
||||||
# Nova
|
|
||||||
[nova-api:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
[nova-conductor:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
[nova-consoleauth:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
[nova-novncproxy:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
[nova-scheduler:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
[nova-spicehtml5proxy:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
[nova-compute-ironic:children]
|
|
||||||
nova
|
|
||||||
|
|
||||||
# Neutron
|
|
||||||
[neutron-server:children]
|
|
||||||
control
|
|
||||||
|
|
||||||
[neutron-dhcp-agent:children]
|
|
||||||
neutron
|
|
||||||
|
|
||||||
[neutron-l3-agent:children]
|
|
||||||
neutron
|
|
||||||
|
|
||||||
[neutron-lbaas-agent:children]
|
|
||||||
neutron
|
|
||||||
|
|
||||||
[neutron-metadata-agent:children]
|
|
||||||
neutron
|
|
||||||
|
|
||||||
[neutron-vpnaas-agent:children]
|
|
||||||
neutron
|
|
||||||
|
|
||||||
# Ceph
|
|
||||||
[ceph-mon:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-rgw:children]
|
|
||||||
ceph
|
|
||||||
|
|
||||||
[ceph-osd:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
# Cinder
|
|
||||||
[cinder-api:children]
|
|
||||||
cinder
|
|
||||||
|
|
||||||
[cinder-backup:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
[cinder-scheduler:children]
|
|
||||||
cinder
|
|
||||||
|
|
||||||
[cinder-volume:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
# Cloudkitty
|
|
||||||
[cloudkitty-api:children]
|
|
||||||
cloudkitty
|
|
||||||
|
|
||||||
[cloudkitty-processor:children]
|
|
||||||
cloudkitty
|
|
||||||
|
|
||||||
# iSCSI
|
|
||||||
[iscsid:children]
|
|
||||||
compute
|
|
||||||
storage
|
|
||||||
ironic-conductor
|
|
||||||
|
|
||||||
[tgtd:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
# Manila
|
|
||||||
[manila-api:children]
|
|
||||||
manila
|
|
||||||
|
|
||||||
[manila-scheduler:children]
|
|
||||||
manila
|
|
||||||
|
|
||||||
[manila-share:children]
|
|
||||||
network
|
|
||||||
|
|
||||||
# Swift
|
|
||||||
[swift-proxy-server:children]
|
|
||||||
swift
|
|
||||||
|
|
||||||
[swift-account-server:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
[swift-container-server:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
[swift-object-server:children]
|
|
||||||
storage
|
|
||||||
|
|
||||||
# Barbican
|
|
||||||
[barbican-api:children]
|
|
||||||
barbican
|
|
||||||
|
|
||||||
[barbican-keystone-listener:children]
|
|
||||||
barbican
|
|
||||||
|
|
||||||
[barbican-worker:children]
|
|
||||||
barbican
|
|
||||||
|
|
||||||
# Heat
|
|
||||||
[heat-api:children]
|
|
||||||
heat
|
|
||||||
|
|
||||||
[heat-api-cfn:children]
|
|
||||||
heat
|
|
||||||
|
|
||||||
[heat-engine:children]
|
|
||||||
heat
|
|
||||||
|
|
||||||
# Murano
|
|
||||||
[murano-api:children]
|
|
||||||
murano
|
|
||||||
|
|
||||||
[murano-engine:children]
|
|
||||||
murano
|
|
||||||
|
|
||||||
# Ironic
|
|
||||||
[ironic-api:children]
|
|
||||||
ironic
|
|
||||||
|
|
||||||
[ironic-conductor:children]
|
|
||||||
ironic
|
|
||||||
|
|
||||||
[ironic-inspector:children]
|
|
||||||
ironic
|
|
||||||
|
|
||||||
[ironic-pxe:children]
|
|
||||||
ironic
|
|
||||||
|
|
||||||
# Magnum
|
|
||||||
[magnum-api:children]
|
|
||||||
magnum
|
|
||||||
|
|
||||||
[magnum-conductor:children]
|
|
||||||
magnum
|
|
||||||
|
|
||||||
# Sahara
|
|
||||||
[sahara-api:children]
|
|
||||||
sahara
|
|
||||||
|
|
||||||
[sahara-engine:children]
|
|
||||||
sahara
|
|
||||||
|
|
||||||
# Mistral
|
|
||||||
[mistral-api:children]
|
|
||||||
mistral
|
|
||||||
|
|
||||||
[mistral-executor:children]
|
|
||||||
mistral
|
|
||||||
|
|
||||||
[mistral-engine:children]
|
|
||||||
mistral
|
|
||||||
|
|
||||||
# Ceilometer
|
|
||||||
[ceilometer-api:children]
|
|
||||||
ceilometer
|
|
||||||
|
|
||||||
[ceilometer-central:children]
|
|
||||||
ceilometer
|
|
||||||
|
|
||||||
[ceilometer-notification:children]
|
|
||||||
ceilometer
|
|
||||||
|
|
||||||
[ceilometer-collector:children]
|
|
||||||
ceilometer
|
|
||||||
|
|
||||||
[ceilometer-compute:children]
|
|
||||||
compute
|
|
||||||
|
|
||||||
# Aodh
|
|
||||||
[aodh-api:children]
|
|
||||||
aodh
|
|
||||||
|
|
||||||
[aodh-evaluator:children]
|
|
||||||
aodh
|
|
||||||
|
|
||||||
[aodh-listener:children]
|
|
||||||
aodh
|
|
||||||
|
|
||||||
[aodh-notifier:children]
|
|
||||||
aodh
|
|
||||||
|
|
||||||
# Congress
|
|
||||||
[congress-api:children]
|
|
||||||
congress
|
|
||||||
|
|
||||||
[congress-datasource:children]
|
|
||||||
congress
|
|
||||||
|
|
||||||
[congress-policy-engine:children]
|
|
||||||
congress
|
|
||||||
|
|
||||||
# Gnocchi
|
|
||||||
[gnocchi-api:children]
|
|
||||||
gnocchi
|
|
||||||
|
|
||||||
[gnocchi-statsd:children]
|
|
||||||
gnocchi
|
|
||||||
|
|
||||||
[gnocchi-metricd:children]
|
|
||||||
gnocchi
|
|
||||||
|
|
||||||
# Multipathd
|
|
||||||
[multipathd:children]
|
|
||||||
compute
|
|
||||||
|
|
||||||
# Watcher
|
|
||||||
[watcher-api:children]
|
|
||||||
watcher
|
|
||||||
|
|
||||||
[watcher-engine:children]
|
|
||||||
watcher
|
|
||||||
|
|
||||||
[watcher-applier:children]
|
|
||||||
watcher
|
|
||||||
|
|
||||||
# Senlin
|
|
||||||
[senlin-api:children]
|
|
||||||
senlin
|
|
||||||
|
|
||||||
[senlin-engine:children]
|
|
||||||
senlin
|
|
||||||
EOF
|
|
||||||
|
|
||||||
my_ip=$(ip route get 192.168.0.1 | awk '{ print $5 }')
|
|
||||||
vip=$(python -c "import netaddr; a = netaddr.IPAddress('$my_ip'); print a+1")
|
|
||||||
my_intf=$(ip route get 192.168.0.1 | awk '{ print $3 }')
|
|
||||||
|
|
||||||
cp /usr/share/kolla/etc_examples/kolla/* /etc/kolla
|
|
||||||
cat >> /etc/kolla/globals.yml << EOF
|
|
||||||
##################################################
|
|
||||||
# Begin overrides
|
|
||||||
##################################################
|
|
||||||
|
|
||||||
# OpenStack distro
|
|
||||||
kolla_base_distro: "centos"
|
|
||||||
kolla_install_type: "binary"
|
|
||||||
openstack_release: "3.0.1"
|
|
||||||
|
|
||||||
# Networking
|
|
||||||
kolla_internal_vip_address: "${vip}"
|
|
||||||
network_interface: "${my_intf}"
|
|
||||||
|
|
||||||
# TLS
|
|
||||||
#kolla_enable_tls_external: "no"
|
|
||||||
#kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
|
|
||||||
|
|
||||||
# Services
|
|
||||||
enable_ironic: "yes"
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Generate passwords
|
|
||||||
kolla-genpwd
|
|
||||||
|
|
||||||
# Configure Kolla build
|
|
||||||
cat > /etc/kolla/template-override.j2 << EOF
|
|
||||||
{% extends parent_template %}
|
|
||||||
|
|
||||||
# Disable troublesome keys
|
|
||||||
{% set base_yum_repo_keys_override=['http://yum.mariadb.org/RPM-GPG-KEY-MariaDB'] %}
|
|
||||||
# Disable repos with troublesome keys
|
|
||||||
{% set base_yum_repo_files_override=['MariaDB.repo'] %}
|
|
||||||
EOF
|
|
||||||
cat > /etc/kolla/kolla-build.conf << EOF
|
|
||||||
[DEFAULT]
|
|
||||||
template_override=/etc/kolla/template-override.j2
|
|
||||||
EOF
|
|
||||||
|
|
||||||
# Configure Bifrost
|
|
||||||
mkdir /etc/kolla/config/bifrost
|
|
||||||
cat > /etc/kolla/config/bifrost/bifrost.yml << EOF
|
|
||||||
---
|
|
||||||
EOF
|
|
||||||
cat > /etc/kolla/config/bifrost/dib.yml << EOF
|
|
||||||
---
|
|
||||||
dib_os_element: "centos7"
|
|
||||||
EOF
|
|
||||||
cat > /etc/kolla/config/bifrost/servers.yml << EOF
|
|
||||||
---
|
|
||||||
EOF
|
|
||||||
}
|
|
||||||
|
|
||||||
function bootstrap_seed_kolla {
|
|
||||||
# Bootstrap seed node
|
|
||||||
kolla-ansible bootstrap-servers -i /etc/kolla/inventory/seed -e ansible_user=$(whoami)
|
|
||||||
ansible seed -i /etc/kolla/inventory/seed -b -m authorized_key -a "user=kolla key='$(cat ~/.ssh/id_rsa.pub)'" -e ansible_user=$(whoami)
|
|
||||||
ansible seed -i /etc/kolla/inventory/seed -b -m user -a "name=$(whoami) groups=kolla,docker append=true"
|
|
||||||
ansible seed -i /etc/kolla/inventory/seed -m command -a 'docker info'
|
|
||||||
# Enable NTPd
|
|
||||||
ansible seed -i /etc/kolla/inventory/seed -b -m service -a 'name=ntpd state=started enabled=yes'
|
|
||||||
}
|
|
||||||
|
|
||||||
function configure_seed_docker {
|
|
||||||
# TODO
|
|
||||||
echo "TODO: configure docker on seed"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function deploy_bifrost {
|
function deploy_bifrost {
|
||||||
if true ; then
|
# Use a pre-built bifrost image in the stackhpc repository.
|
||||||
# Build Bifrost image
|
# The image was built via kolla-build -t source bifrost-deploy.
|
||||||
# FIXME: sudo required because we need to log out/in for docker group
|
run_playbook ansible/kolla-bifrost.yml
|
||||||
# membership to take effect.
|
run_kolla_ansible deploy-bifrost \
|
||||||
sudo kolla-build -t source bifrost-deploy
|
-e kolla_install_type=source \
|
||||||
else
|
-e docker_namespace=stackhpc
|
||||||
# Image on Dockerhub not currently working :(
|
|
||||||
docker pull docker.io/kolla/centos-source-bifrost-deploy:3.0.1
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Deploy Bifrost
|
|
||||||
kolla-ansible deploy-bifrost -i /etc/kolla/inventory/seed -e kolla_install_type=source
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function deploy_seed_node {
|
function deploy_seed_node {
|
||||||
configure_seed_os
|
configure_os
|
||||||
install_kolla
|
|
||||||
configure_kolla
|
|
||||||
bootstrap_seed_kolla
|
|
||||||
configure_seed_docker
|
|
||||||
deploy_bifrost
|
deploy_bifrost
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -608,11 +61,6 @@ function deploy_seed_node {
|
|||||||
# Main
|
# Main
|
||||||
|
|
||||||
function main {
|
function main {
|
||||||
if [[ $# -ne 1 ]]; then
|
|
||||||
echo "Usage: $0 <controller IP>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
CONTROLLER_IP=$1
|
|
||||||
deploy_seed_node
|
deploy_seed_node
|
||||||
}
|
}
|
||||||
|
|
||||||
|
36
etc/kayobe/bifrost.yml
Normal file
36
etc/kayobe/bifrost.yml
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
---
|
||||||
|
# Kayobe configuration for Bifrost.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Diskimage-builder configuration.
|
||||||
|
|
||||||
|
# DIB base OS element.
|
||||||
|
#kolla_bifrost_dib_os_element:
|
||||||
|
|
||||||
|
# List of DIB elements.
|
||||||
|
#kolla_bifrost_dib_elements:
|
||||||
|
|
||||||
|
# DIB init element.
|
||||||
|
#kolla_bifrost_dib_init_element:
|
||||||
|
|
||||||
|
# DIB environment variables.
|
||||||
|
#kolla_bifrost_dib_env_vars:
|
||||||
|
|
||||||
|
# List of DIB packages to install.
|
||||||
|
#kolla_bifrost_dib_packages:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Ironic configuration.
|
||||||
|
|
||||||
|
# Whether to enable ipmitool-based drivers.
|
||||||
|
#kolla_bifrost_enable_ipmitool_drivers:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Inventory configuration.
|
||||||
|
|
||||||
|
# Server inventory for Bifrost.
|
||||||
|
#kolla_bifrost_servers:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
23
etc/kayobe/dns.yml
Normal file
23
etc/kayobe/dns.yml
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# DNS.
|
||||||
|
|
||||||
|
# List of DNS nameservers.
|
||||||
|
#resolv_nameservers:
|
||||||
|
|
||||||
|
# DNS domain suffix.
|
||||||
|
#resolv_domain:
|
||||||
|
|
||||||
|
# List of DNS search suffixes.
|
||||||
|
#resolv_search:
|
||||||
|
|
||||||
|
# List of IP address and netmask pairs to sort addresses returned by
|
||||||
|
# gethostbyname.
|
||||||
|
#resolv_sortlist:
|
||||||
|
|
||||||
|
# List of DNS options.
|
||||||
|
#resolv_options:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
15
etc/kayobe/globals.yml
Normal file
15
etc/kayobe/globals.yml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
---
|
||||||
|
# Kayobe global configuration.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Miscellaneous configuration.
|
||||||
|
|
||||||
|
# Path to Kayobe configuration.
|
||||||
|
#kayobe_config_path:
|
||||||
|
|
||||||
|
# Path in which to cache downloaded images.
|
||||||
|
#image_cache_path:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network interface definitions for the controller group.
|
||||||
|
|
||||||
|
# Overcloud provisioning network IP information.
|
||||||
|
# provision_oc_net_interface:
|
||||||
|
# provision_oc_net_bridge_ports:
|
||||||
|
|
||||||
|
# Workload provisioning network IP information.
|
||||||
|
# provision_wl_net_interface:
|
||||||
|
# provision_wl_net_bridge_ports:
|
||||||
|
|
||||||
|
# Internal network IP information.
|
||||||
|
# internal_net_interface:
|
||||||
|
# internal_net_bridge_ports:
|
||||||
|
|
||||||
|
# External network IP information.
|
||||||
|
# external_net_interface:
|
||||||
|
# external_net_bridge_ports:
|
||||||
|
|
||||||
|
# Storage network IP information.
|
||||||
|
# storage_net_interface:
|
||||||
|
# storage_net_bridge_ports:
|
||||||
|
|
||||||
|
# Storage management network IP information.
|
||||||
|
# storage_mgmt_net_interface:
|
||||||
|
# storage_mgmt_net_bridge_ports:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
31
etc/kayobe/inventory/group_vars/seed/network-interfaces
Normal file
31
etc/kayobe/inventory/group_vars/seed/network-interfaces
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
###############################################################################
|
||||||
|
# Network interface definitions for the seed group.
|
||||||
|
|
||||||
|
# Overcloud provisioning network IP information.
|
||||||
|
# provision_oc_net_interface:
|
||||||
|
# provision_oc_net_bridge_ports:
|
||||||
|
|
||||||
|
# Workload provisioning network IP information.
|
||||||
|
# provision_wl_net_interface:
|
||||||
|
# provision_wl_net_bridge_ports:
|
||||||
|
|
||||||
|
# Internal network IP information.
|
||||||
|
# internal_net_interface:
|
||||||
|
# internal_net_bridge_ports:
|
||||||
|
|
||||||
|
# External network IP information.
|
||||||
|
# external_net_interface:
|
||||||
|
# external_net_bridge_ports:
|
||||||
|
|
||||||
|
# Storage network IP information.
|
||||||
|
# storage_net_interface:
|
||||||
|
# storage_net_bridge_ports:
|
||||||
|
|
||||||
|
# Storage management network IP information.
|
||||||
|
# storage_mgmt_net_interface:
|
||||||
|
# storage_mgmt_net_bridge_ports:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
13
etc/kayobe/inventory/groups
Normal file
13
etc/kayobe/inventory/groups
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Kayobe groups inventory file. This file should generally not be modified.
|
||||||
|
# If declares the top-level groups and sub-groups.
|
||||||
|
|
||||||
|
[seed]
|
||||||
|
# Empty group to provide declaration of seed group.
|
||||||
|
|
||||||
|
[controllers]
|
||||||
|
# Empty group to provide declaration of controllers group.
|
||||||
|
|
||||||
|
[docker:children]
|
||||||
|
# Hosts in this group will have Docker installed.
|
||||||
|
seed
|
||||||
|
controllers
|
19
etc/kayobe/inventory/hosts.example
Normal file
19
etc/kayobe/inventory/hosts.example
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# Kayobe hosts inventory file. This file should be modified to define the hosts
|
||||||
|
# and their top-level group membership.
|
||||||
|
|
||||||
|
[config-mgmt]
|
||||||
|
# This host acts as the configuration management control host. This must be
|
||||||
|
# localhost.
|
||||||
|
localhost ansible_connection=local
|
||||||
|
|
||||||
|
[seed-hypervisor]
|
||||||
|
# Add a seed hypervisor node here if required. This host will run a seed node
|
||||||
|
# Virtual Machine.
|
||||||
|
|
||||||
|
[seed]
|
||||||
|
# Add a seed node here if required. This host will provide the Bifrost
|
||||||
|
# undercloud.
|
||||||
|
|
||||||
|
[controllers]
|
||||||
|
# Add controller nodes here if required. These hosts will provide the
|
||||||
|
# OpenStack overcloud.
|
74
etc/kayobe/kolla.yml
Normal file
74
etc/kayobe/kolla.yml
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
---
|
||||||
|
# Kayobe Kolla configuration.
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Kolla configuration.
|
||||||
|
|
||||||
|
# Path to Kolla configuration directory.
|
||||||
|
#kolla_config_path:
|
||||||
|
|
||||||
|
# Kolla base container image distribution.
|
||||||
|
#kolla_base_distro:
|
||||||
|
|
||||||
|
# Kolla installation type: binary or source.
|
||||||
|
#kolla_install_type:
|
||||||
|
|
||||||
|
# Kolla OpenStack release version. This should be a Docker image tag.
|
||||||
|
#kolla_openstack_release:
|
||||||
|
|
||||||
|
# Whether TLS is enabled for the external API endpoints.
|
||||||
|
#kolla_enable_tls_external:
|
||||||
|
|
||||||
|
# Path to external API certificate.
|
||||||
|
#kolla_external_fqdn_cert:
|
||||||
|
|
||||||
|
# Whether debug logging is enabled.
|
||||||
|
#kolla_openstack_logging_debug:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Kolla feature flag configuration.
|
||||||
|
|
||||||
|
#kolla_enable_aodh:
|
||||||
|
#kolla_enable_barbican:
|
||||||
|
#kolla_enable_cadf_notifications:
|
||||||
|
#kolla_enable_ceilometer:
|
||||||
|
#kolla_enable_central_logging:
|
||||||
|
#kolla_enable_ceph:
|
||||||
|
#kolla_enable_ceph_rgw:
|
||||||
|
#kolla_enable_cinder:
|
||||||
|
#kolla_enable_cinder_backend_iscsi:
|
||||||
|
#kolla_enable_cinder_backend_lvm:
|
||||||
|
#kolla_enable_cloudkitty:
|
||||||
|
#kolla_enable_congress:
|
||||||
|
#kolla_enable_etcd:
|
||||||
|
#kolla_enable_gnocchi:
|
||||||
|
#kolla_enable_grafana:
|
||||||
|
#kolla_enable_heat:
|
||||||
|
#kolla_enable_horizon:
|
||||||
|
#kolla_enable_influxdb:
|
||||||
|
#kolla_enable_ironic:
|
||||||
|
#kolla_enable_iscsid:
|
||||||
|
#kolla_enable_kuryr:
|
||||||
|
#kolla_enable_magnum:
|
||||||
|
#kolla_enable_manila:
|
||||||
|
#kolla_enable_mistral:
|
||||||
|
#kolla_enable_mongodb:
|
||||||
|
#kolla_enable_multipathd:
|
||||||
|
#kolla_enable_murano:
|
||||||
|
#kolla_enable_neutron_vpnaas:
|
||||||
|
#kolla_enable_neutron_dvr:
|
||||||
|
#kolla_enable_neutron_lbaas:
|
||||||
|
#kolla_enable_neutron_qos:
|
||||||
|
#kolla_enable_neutron_agent_ha:
|
||||||
|
#kolla_enable_rally:
|
||||||
|
#kolla_enable_sahara:
|
||||||
|
#kolla_enable_senlin:
|
||||||
|
#kolla_enable_swift:
|
||||||
|
#kolla_enable_telegraf:
|
||||||
|
#kolla_enable_tempest:
|
||||||
|
#kolla_enable_vmtp:
|
||||||
|
#kolla_enable_watcher:
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
# Dummy variable to allow Ansible to accept this file.
|
||||||
|
workaround_ansible_issue_8743: yes
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user