Merge pull request #265 from rustyrobot/solar-ibp-provision
Provisioning for solar using ibp
This commit is contained in:
commit
1cb74b8768
11
README.md
11
README.md
@ -296,3 +296,14 @@ Full documentation of individual functions is found in the `solar/template.py` f
|
||||
# Customizing vagrant-settings.yaml
|
||||
|
||||
Solar is shipped with sane defaults in `vagrant-setting.yaml_defaults`. If you need to adjust them for your needs, e.g. changing resource allocation for VirtualBox machines, you should just compy the file to `vagrant-setting.yaml` and make your modifications.
|
||||
|
||||
# Image based provisioning with Solar
|
||||
|
||||
* In `vagrant-setting.yaml_defaults` or `vagrant-settings.yaml` file uncomment `preprovisioned: false` line.
|
||||
* Run `vagrant up`, it will take some time because it builds image for bootstrap.
|
||||
* Currently in order to perform provisioning, pre-built images from Fuel can be used
|
||||
* Download images [using this link](https://drive.google.com/file/d/0B7I3b5vI7ZYXM0FPTDJEdjg0Qnc/view).
|
||||
* Login into vm `vagrant ssh solar-dev`
|
||||
* Go to `cd /vagrant/tmp/` directory
|
||||
* Untar the images `tar vxf targetimages.tar`
|
||||
* Now you can run provisioning `/vagrant/examples/provisioning/provision.sh`
|
||||
|
@ -6,7 +6,7 @@ bind-interfaces
|
||||
dhcp-range={{dhcp_range_start}},{{dhcp_range_end}},12h
|
||||
|
||||
# Net boot file name
|
||||
dhcp-boot=tag:!nopxe,pxelinux.0
|
||||
dhcp-boot=net:!nopxe,pxelinux.0
|
||||
|
||||
# Configure tftp
|
||||
enable-tftp
|
||||
|
8
bootstrap/playbooks/files/nginx_vagrant_dir.cfg
Normal file
8
bootstrap/playbooks/files/nginx_vagrant_dir.cfg
Normal file
@ -0,0 +1,8 @@
|
||||
server {
|
||||
listen 8001;
|
||||
root /vagrant;
|
||||
|
||||
location / {
|
||||
autoindex on;
|
||||
}
|
||||
}
|
@ -47,4 +47,10 @@
|
||||
# Configure http server to load root
|
||||
- apt: name=nginx state=present
|
||||
- template: src=files/nginx.cfg dest=/etc/nginx/conf.d/pxe_image.conf
|
||||
# Configure http server in order serve file in '/vagrant' directory
|
||||
- template: src=files/nginx_vagrant_dir.cfg dest=/etc/nginx/conf.d/vagrant_dir.conf
|
||||
- service: name=nginx state=restarted
|
||||
|
||||
# Install discovery service
|
||||
- shell: pip install git+https://github.com/rustyrobot/discovery.git
|
||||
- shell: 'discovery &'
|
||||
|
36
examples/provisioning/provision.py
Executable file
36
examples/provisioning/provision.py
Executable file
@ -0,0 +1,36 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import requests
|
||||
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
|
||||
from solar.events.controls import React
|
||||
from solar.events.api import add_event
|
||||
|
||||
|
||||
discovery_service = 'http://0.0.0.0:8881'
|
||||
|
||||
nodes_list = requests.get(discovery_service).json()
|
||||
|
||||
# Create slave node resources
|
||||
node_resources = vr.create('nodes', 'templates/not_provisioned_nodes.yaml', {'nodes': nodes_list})
|
||||
|
||||
# Get master node
|
||||
master_node = filter(lambda n: n.name == 'node_master', node_resources)[0]
|
||||
|
||||
# Dnsmasq resources
|
||||
for node in nodes_list:
|
||||
dnsmasq = vr.create('dnsmasq_{0}'.format(node['mac'].replace(':', '_')), 'resources/dnsmasq', {})[0]
|
||||
node = filter(lambda n: n.name.endswith('node_{0}'.format(node['mac']).replace(':', '_')), node_resources)[0]
|
||||
master_node.connect(dnsmasq)
|
||||
node.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'})
|
||||
|
||||
event = React(node.name, 'run', 'success', node.name, 'provision')
|
||||
add_event(event)
|
||||
event = React(node.name, 'provision', 'success', dnsmasq.name, 'exclude_mac_pxe')
|
||||
add_event(event)
|
||||
event = React(dnsmasq.name, 'exclude_mac_pxe', 'success', node.name, 'reboot')
|
||||
add_event(event)
|
17
examples/provisioning/provision.sh
Executable file
17
examples/provisioning/provision.sh
Executable file
@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
# Remove generated pxe exclude files
|
||||
sudo rm -f /etc/dnsmasq.d/no_pxe_*.conf
|
||||
sudo service dnsmasq restart
|
||||
|
||||
solar resource clear_all
|
||||
python "${DIR}"/provision.py
|
||||
|
||||
solar changes stage
|
||||
solar changes process
|
||||
solar orch run-once last
|
||||
watch --color -n1 'solar orch report last'
|
17
resources/dnsmasq/actions/exclude_mac_pxe.yaml
Normal file
17
resources/dnsmasq/actions/exclude_mac_pxe.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
- hosts: [{{host}}]
|
||||
sudo: yes
|
||||
|
||||
tasks:
|
||||
- lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe"
|
||||
# FIXME: currently there is no way to specify
|
||||
# policy not to run several tasks in parallel,
|
||||
# so when we deploy several nodes in parallel
|
||||
# it causes the problems when two tasks try
|
||||
# to restart supervisor at the same time, and
|
||||
# fail to do it.
|
||||
- command: service dnsmasq status
|
||||
register: log
|
||||
until: log.stdout.find('running') > -1
|
||||
retries: 5
|
||||
delay: 2
|
||||
- shell: service dnsmasq restart
|
2
resources/dnsmasq/actions/run.yaml
Normal file
2
resources/dnsmasq/actions/run.yaml
Normal file
@ -0,0 +1,2 @@
|
||||
- hosts: [{{host}}]
|
||||
sudo: yes
|
18
resources/dnsmasq/meta.yaml
Normal file
18
resources/dnsmasq/meta.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
id: dnsmasq
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
|
||||
actions:
|
||||
exclude_mac_pxe: exclude_mac_pxe.yaml
|
||||
run: run.yaml
|
||||
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
exclude_mac_pxe:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resources=dnsmasq]
|
9
resources/not_provisioned_node/actions/provision.sh
Normal file
9
resources/not_provisioned_node/actions/provision.sh
Normal file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
# TODO should be a way to render configs, in order to do this
|
||||
# we should have scripts dir variable passed from above
|
||||
sed -i "s|<ROOT>|${DIR}|" "${DIR}"/templates/agent.config
|
||||
provision --input_data_file "${DIR}"/templates/provisioning.json --config-file "${DIR}"/templates/agent.config
|
6
resources/not_provisioned_node/actions/reboot.sh
Normal file
6
resources/not_provisioned_node/actions/reboot.sh
Normal file
@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
reboot now
|
||||
|
8
resources/not_provisioned_node/actions/run.sh
Normal file
8
resources/not_provisioned_node/actions/run.sh
Normal file
@ -0,0 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -eux
|
||||
|
||||
# Fake run action which is required in order to make
|
||||
# dependency `run` -> `provision`
|
||||
|
||||
exit 0
|
25
resources/not_provisioned_node/meta.yaml
Normal file
25
resources/not_provisioned_node/meta.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
id: not_provisioned_node
|
||||
handler: shell
|
||||
version: 1.0.0
|
||||
|
||||
actions:
|
||||
provision: provision.sh
|
||||
run: run.sh
|
||||
reboot: reboot.sh
|
||||
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
admin_mac:
|
||||
schema: str!
|
||||
value:
|
||||
name:
|
||||
schema: str
|
||||
value: a node
|
||||
location_id:
|
||||
schema: str!
|
||||
value: $uuid
|
||||
reverse: True
|
||||
|
||||
tags: [resources=node]
|
2
resources/not_provisioned_node/templates/agent.config
Normal file
2
resources/not_provisioned_node/templates/agent.config
Normal file
@ -0,0 +1,2 @@
|
||||
[DEFAULT]
|
||||
nc_template_path=<ROOT>/templates/cloud-init-templates/
|
@ -0,0 +1,55 @@
|
||||
#cloud-boothook
|
||||
#!/bin/bash
|
||||
|
||||
cloud-init-per instance disable_selinux_on_the_fly setenforce 0
|
||||
|
||||
cloud-init-per instance disable_selinux sed -i 's/^SELINUX=.*/SELINUX=disabled/g' /etc/sysconfig/selinux
|
||||
|
||||
# configure udev rules
|
||||
|
||||
# udev persistent net
|
||||
cloud-init-per instance udev_persistent_net1 service network stop
|
||||
|
||||
ADMIN_MAC={{ common.admin_mac }}
|
||||
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
|
||||
cloud-init-per instance configure_admin_interface /bin/sh -c "echo -e \"# FROM COBBLER SNIPPET\nDEVICE=$ADMIN_IF\nIPADDR={{ common.admin_ip }}\nNETMASK={{ common.admin_mask }}\nBOOTPROTO=none\nONBOOT=yes\nUSERCTL=no\n\" | tee /etc/sysconfig/network-scripts/ifcfg-$ADMIN_IF"
|
||||
|
||||
cloud-init-per instance set_gateway /bin/sh -c 'echo GATEWAY="{{ common.gw }}" | tee -a /etc/sysconfig/network'
|
||||
|
||||
cloud-init-per instance udev_persistent_net5 service network start
|
||||
|
||||
# end of udev
|
||||
|
||||
#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned,
|
||||
# cloud-init will start to generate resolv.conf with non-actual data
|
||||
cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf
|
||||
cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip }} | tee -a /etc/resolv.conf'
|
||||
|
||||
# configure black module lists
|
||||
# virt-what should be installed
|
||||
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
|
||||
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :)
|
||||
modprobe -r i2c_piix4
|
||||
fi
|
||||
|
||||
cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/rc.modules'
|
||||
cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/rc.modules'
|
||||
cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/rc.modules'
|
||||
cloud-init-per instance chmod_rc_modules chmod +x /etc/rc.modules
|
||||
cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf'
|
||||
cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf'
|
||||
|
||||
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
|
||||
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
|
||||
cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre
|
||||
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
|
||||
cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60"
|
||||
|
||||
cloud-init-per instance mkdir_coredump mkdir -p /var/log/coredump
|
||||
cloud-init-per instance set_coredump /bin/sh -c 'echo -e "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t" | tee -a /etc/sysctl.conf'
|
||||
cloud-init-per instance set_coredump_sysctl sysctl -w "kernel.core_pattern=/var/log/coredump/core.%e.%p.%h.%t"
|
||||
cloud-init-per instance set_chmod chmod 777 /var/log/coredump
|
||||
cloud-init-per instance set_limits /bin/sh -c 'echo -e "* soft core unlimited\n* hard core unlimited" | tee -a /etc/security/limits.conf'
|
@ -0,0 +1,55 @@
|
||||
#cloud-boothook
|
||||
#!/bin/bash
|
||||
|
||||
# udev persistent net
|
||||
cloud-init-per instance udev_persistent_net1 /etc/init.d/networking stop
|
||||
|
||||
ADMIN_MAC={{ common.admin_mac }}
|
||||
ADMIN_IF=$(echo {{ common.udevrules }} | sed 's/[,=]/\n/g' | grep "$ADMIN_MAC" | cut -d_ -f2 | head -1)
|
||||
# Check if we do not already have static config (or interface seems unconfigured)
|
||||
if [ ! -d "/etc/network/interfaces.d" ]; then
|
||||
mkdir -p /etc/network/interfaces.d
|
||||
echo 'source /etc/network/interfaces.d/*' > /etc/network/interfaces
|
||||
fi
|
||||
if [ ! -e "/etc/network/interfaces.d/ifcfg-$ADMIN_IF" ]; then
|
||||
echo -e "auto $ADMIN_IF\niface $ADMIN_IF inet static\n\taddress {{ common.admin_ip }}\n\tnetmask {{ common.admin_mask }}\n\tgateway {{ common.gw }}" > /etc/network/interfaces.d/ifcfg-"$ADMIN_IF"
|
||||
fi
|
||||
|
||||
cloud-init-per instance udev_persistent_net5 /etc/init.d/networking start
|
||||
|
||||
# end of udev
|
||||
|
||||
#FIXME(agordeev): if operator updates dns settings on masternode after the node had been provisioned,
|
||||
# cloud-init will start to generate resolv.conf with non-actual data
|
||||
cloud-init-per instance resolv_conf_mkdir mkdir -p /etc/resolvconf/resolv.conf.d
|
||||
cloud-init-per instance resolv_conf_remove rm -f /etc/resolv.conf
|
||||
cloud-init-per instance resolv_conf_head_remove rm -f /etc/resolvconf/resolv.conf.d/head
|
||||
cloud-init-per instance resolv_conf_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_head_header /bin/sh -c 'echo "# re-generated by cloud-init boothook only at the first boot;" | tee /etc/resolvconf/resolv.conf.d/head'
|
||||
cloud-init-per instance resolv_conf_head_search /bin/sh -c 'echo "search {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head'
|
||||
cloud-init-per instance resolv_conf_head_domain /bin/sh -c 'echo "domain {{ common.search_domain|replace('"','') }}" | tee -a /etc/resolvconf/resolv.conf.d/head'
|
||||
cloud-init-per instance resolv_conf_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolv.conf'
|
||||
cloud-init-per instance resolv_conf_head_nameserver /bin/sh -c 'echo nameserver {{ common.master_ip|replace('"','') }} | tee -a /etc/resolvconf/resolv.conf.d/head'
|
||||
|
||||
# configure black module lists
|
||||
# virt-what should be installed
|
||||
if [ ! -f /etc/modprobe.d/blacklist-i2c_piix4.conf ]; then
|
||||
([[ $(virt-what) = "virtualbox" ]] && echo "blacklist i2c_piix4" >> /etc/modprobe.d/blacklist-i2c_piix4.conf || :) && update-initramfs -u -k all
|
||||
modprobe -r i2c_piix4
|
||||
fi
|
||||
|
||||
cloud-init-per instance conntrack_ipv4 /bin/sh -c 'echo nf_conntrack_ipv4 | tee -a /etc/modules'
|
||||
cloud-init-per instance conntrack_ipv6 /bin/sh -c 'echo nf_conntrack_ipv6 | tee -a /etc/modules'
|
||||
cloud-init-per instance conntrack_proto_gre /bin/sh -c 'echo nf_conntrack_proto_gre | tee -a /etc/modules'
|
||||
cloud-init-per instance conntrack_max /bin/sh -c 'echo "net.nf_conntrack_max=1048576" | tee -a /etc/sysctl.conf'
|
||||
cloud-init-per instance kernel_panic /bin/sh -c 'echo "kernel.panic=60" | tee -a /etc/sysctl.conf'
|
||||
|
||||
cloud-init-per instance conntrack_ipv4_load modprobe nf_conntrack_ipv4
|
||||
cloud-init-per instance conntrack_ipv6_load modprobe nf_conntrack_ipv6
|
||||
cloud-init-per instance conntrack_proto_gre_load modprobe nf_conntrack_proto_gre
|
||||
cloud-init-per instance conntrack_max_set sysctl -w "net.nf_conntrack_max=1048576"
|
||||
cloud-init-per instance kernel_panic_set sysctl -w "kernel.panic=60"
|
||||
|
||||
cloud-init-per instance dhclient /bin/sh -c 'echo "supersede routers 0;" | tee /etc/dhcp/dhclient.conf'
|
@ -0,0 +1,26 @@
|
||||
#cloud-config
|
||||
resize_rootfs: false
|
||||
growpart:
|
||||
mode: false
|
||||
disable_ec2_metadata: true
|
||||
disable_root: false
|
||||
|
||||
# password: RANDOM
|
||||
# chpasswd: { expire: True }
|
||||
|
||||
ssh_pwauth: false
|
||||
ssh_authorized_keys:
|
||||
{% for key in common.ssh_auth_keys %}
|
||||
- {{ key }}
|
||||
{% endfor %}
|
||||
|
||||
# set the locale to a given locale
|
||||
# default: en_US.UTF-8
|
||||
locale: en_US.UTF-8
|
||||
|
||||
timezone: {{ common.timezone }}
|
||||
|
||||
hostname: {{ common.hostname }}
|
||||
fqdn: {{ common.fqdn }}
|
||||
|
||||
final_message: "YAY! The system is finally up, after $UPTIME seconds"
|
@ -0,0 +1,25 @@
|
||||
#cloud-config
|
||||
resize_rootfs: false
|
||||
growpart:
|
||||
mode: false
|
||||
disable_ec2_metadata: true
|
||||
disable_root: false
|
||||
user: root
|
||||
password: r00tme
|
||||
chpasswd: { expire: false }
|
||||
ssh_pwauth: false
|
||||
ssh_authorized_keys:
|
||||
{% for key in common.ssh_auth_keys %}
|
||||
- {{ key }}
|
||||
{% endfor %}
|
||||
|
||||
# set the locale to a given locale
|
||||
# default: en_US.UTF-8
|
||||
locale: en_US.UTF-8
|
||||
|
||||
timezone: {{ common.timezone }}
|
||||
|
||||
hostname: {{ common.hostname }}
|
||||
fqdn: {{ common.fqdn }}
|
||||
|
||||
final_message: "YAY! The system is finally up, after $UPTIME seconds"
|
@ -0,0 +1,11 @@
|
||||
# instance-id will be autogenerated
|
||||
# instance-id: iid-abcdefg
|
||||
#network-interfaces: |
|
||||
# auto {{ common.admin_iface_name|default("eth0") }}
|
||||
# iface {{ common.admin_iface_name|default("eth0") }} inet static
|
||||
# address {{ common.admin_ip }}
|
||||
# # network 192.168.1.0
|
||||
# netmask {{ common.admin_mask }}
|
||||
# # broadcast 192.168.1.255
|
||||
# # gateway 192.168.1.254
|
||||
hostname: {{ common.hostname }}
|
@ -0,0 +1,11 @@
|
||||
# instance-id will be autogenerated
|
||||
# instance-id: iid-abcdefg
|
||||
#network-interfaces: |
|
||||
# auto {{ common.admin_iface_name|default("eth0") }}
|
||||
# iface {{ common.admin_iface_name|default("eth0") }} inet static
|
||||
# address {{ common.admin_ip }}
|
||||
# # network 192.168.1.0
|
||||
# netmask {{ common.admin_mask }}
|
||||
# # broadcast 192.168.1.255
|
||||
# # gateway 192.168.1.254
|
||||
hostname: {{ common.hostname }}
|
220
resources/not_provisioned_node/templates/provisioning.json
Normal file
220
resources/not_provisioned_node/templates/provisioning.json
Normal file
@ -0,0 +1,220 @@
|
||||
{
|
||||
"profile": "ubuntu_1404_x86_64",
|
||||
"name_servers_search": "\"example.com\"",
|
||||
"uid": "2",
|
||||
"interfaces": {
|
||||
"eth1": {
|
||||
"static": "0",
|
||||
"mac_address": "08:00:27:6e:6d:b4"
|
||||
},
|
||||
"eth0": {
|
||||
"ip_address": "10.0.2.15",
|
||||
"dns_name": "node-8.test.domain.local",
|
||||
"netmask": "255.255.255.0",
|
||||
"static": "0",
|
||||
"mac_address": "08:00:27:ea:35:e7"
|
||||
}
|
||||
},
|
||||
"interfaces_extra": {
|
||||
"eth1": {
|
||||
"onboot": "no",
|
||||
"peerdns": "no"
|
||||
},
|
||||
"eth0": {
|
||||
"onboot": "no",
|
||||
"peerdns": "no"
|
||||
}
|
||||
},
|
||||
"power_type": "ssh",
|
||||
"power_user": "root",
|
||||
"kernel_options": {
|
||||
"udevrules": "08:00:27:6e:6d:b4_eth1,08:00:27:ea:35:e7_eth0",
|
||||
"netcfg/choose_interface": "08:00:27:ea:35:e7"
|
||||
},
|
||||
"power_address": "10.20.0.1",
|
||||
"name_servers": "\"127.0.0.1\"",
|
||||
"ks_meta": {
|
||||
"gw": "10.20.0.1",
|
||||
"mco_enable": 1,
|
||||
"mco_vhost": "mcollective",
|
||||
"repo_setup": {
|
||||
"installer_kernel": {
|
||||
"local": "/var/www/nailgun/ubuntu/x86_64/images/linux",
|
||||
"remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/linux"
|
||||
},
|
||||
"repos": [
|
||||
{
|
||||
"name": "ubuntu",
|
||||
"section": "main universe multiverse",
|
||||
"uri": "http://archive.ubuntu.com/ubuntu/",
|
||||
"priority": null,
|
||||
"suite": "trusty",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "ubuntu-updates",
|
||||
"section": "main universe multiverse",
|
||||
"uri": "http://archive.ubuntu.com/ubuntu/",
|
||||
"priority": null,
|
||||
"suite": "trusty-updates",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "ubuntu-security",
|
||||
"section": "main universe multiverse",
|
||||
"uri": "http://archive.ubuntu.com/ubuntu/",
|
||||
"priority": null,
|
||||
"suite": "trusty-security",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "mos",
|
||||
"section": "main restricted",
|
||||
"uri": "http://127.0.0.1:8080/2015.1.0-7.0/ubuntu/x86_64",
|
||||
"priority": 1050,
|
||||
"suite": "mos7.0",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "mos-updates",
|
||||
"section": "main restricted",
|
||||
"uri": "http://mirror.fuel-infra.org/mos/ubuntu/",
|
||||
"priority": 1050,
|
||||
"suite": "mos7.0-updates",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "mos-security",
|
||||
"section": "main restricted",
|
||||
"uri": "http://mirror.fuel-infra.org/mos/ubuntu/",
|
||||
"priority": 1050,
|
||||
"suite": "mos7.0-security",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "mos-holdback",
|
||||
"section": "main restricted",
|
||||
"uri": "http://mirror.fuel-infra.org/mos/ubuntu/",
|
||||
"priority": 1100,
|
||||
"suite": "mos7.0-holdback",
|
||||
"type": "deb"
|
||||
},
|
||||
{
|
||||
"name": "Auxiliary",
|
||||
"section": "main restricted",
|
||||
"uri": "http://127.0.0.1:8080/2015.1.0-7.0/ubuntu/auxiliary",
|
||||
"priority": 1150,
|
||||
"suite": "auxiliary",
|
||||
"type": "deb"
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"always_editable": true,
|
||||
"weight": 50,
|
||||
"label": "Repositories"
|
||||
},
|
||||
"installer_initrd": {
|
||||
"local": "/var/www/nailgun/ubuntu/x86_64/images/initrd.gz",
|
||||
"remote_relative": "dists/trusty/main/installer-amd64/current/images/netboot/ubuntu-installer/amd64/initrd.gz"
|
||||
}
|
||||
},
|
||||
"authorized_keys": [],
|
||||
"mlnx_iser_enabled": false,
|
||||
"mco_pskey": "Gie6iega9ohngaenahthohngu8aebohxah9seidi",
|
||||
"mco_user": "guest",
|
||||
"puppet_enable": 0,
|
||||
"fuel_version": "6.1",
|
||||
"install_log_2_syslog": 1,
|
||||
"image_data": {
|
||||
"/boot": {
|
||||
"container": "gzip",
|
||||
"uri": "http://10.0.0.2:8001/tmp/targetimages/env_3_ubuntu_1404_amd64-boot.img.gz",
|
||||
"format": "ext2"
|
||||
},
|
||||
"/": {
|
||||
"container": "gzip",
|
||||
"uri": "http://10.0.0.2:8001/tmp/targetimages/env_3_ubuntu_1404_amd64.img.gz",
|
||||
"format": "ext4"
|
||||
}
|
||||
},
|
||||
"timezone": "Etc/UTC",
|
||||
"puppet_auto_setup": 1,
|
||||
"puppet_master": "localhost",
|
||||
"mco_auto_setup": 1,
|
||||
"mco_password": "guest",
|
||||
"auth_key": "\"\"",
|
||||
"pm_data": {
|
||||
"kernel_params": "console=ttyS0,9600 console=tty0 net.ifnames=0 biosdevname=0 rootdelay=90 nomodeset",
|
||||
"ks_spaces": [
|
||||
{
|
||||
"name": "sda",
|
||||
"extra": [],
|
||||
"free_space": 304617,
|
||||
"volumes": [
|
||||
{
|
||||
"type": "boot",
|
||||
"size": 300
|
||||
},
|
||||
{
|
||||
"mount": "/boot",
|
||||
"type": "raid",
|
||||
"file_system": "ext2",
|
||||
"name": "Boot",
|
||||
"size": 200
|
||||
},
|
||||
{
|
||||
"type": "lvm_meta_pool",
|
||||
"size": 0
|
||||
},
|
||||
{
|
||||
"vg": "os",
|
||||
"type": "pv",
|
||||
"lvm_meta_size": 64,
|
||||
"size": 20000,
|
||||
"orig_size": 59456
|
||||
}
|
||||
],
|
||||
"type": "disk",
|
||||
"id": "sda",
|
||||
"size": 42800,
|
||||
"orig_size": 305245
|
||||
},
|
||||
{
|
||||
"_allocate_size": "min",
|
||||
"label": "Base System",
|
||||
"min_size": 19936,
|
||||
"orig_min_size": 59392,
|
||||
"volumes": [
|
||||
{
|
||||
"mount": "/",
|
||||
"size": 11744,
|
||||
"type": "lv",
|
||||
"name": "root",
|
||||
"file_system": "ext4"
|
||||
},
|
||||
{
|
||||
"mount": "swap",
|
||||
"size": 8192,
|
||||
"type": "lv",
|
||||
"name": "swap",
|
||||
"file_system": "swap"
|
||||
}
|
||||
],
|
||||
"type": "vg",
|
||||
"id": "os"
|
||||
}
|
||||
]
|
||||
},
|
||||
"mlnx_plugin_mode": "disabled",
|
||||
"master_ip": "127.0.0.1",
|
||||
"mco_connector": "rabbitmq",
|
||||
"mlnx_vf_num": "16",
|
||||
"admin_net": "10.20.0.0/24",
|
||||
"mco_host": "localhost"
|
||||
},
|
||||
"name": "node-2",
|
||||
"hostname": "node-2.example.com",
|
||||
"slave_name": "node-2",
|
||||
"power_pass": "/root/.ssh/bootstrap.rsa",
|
||||
"netboot_enabled": "1"
|
||||
}
|
43
templates/not_provisioned_nodes.yaml
Normal file
43
templates/not_provisioned_nodes.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
id: not_provisioned_nodes
|
||||
resources:
|
||||
{% for node in nodes %}
|
||||
{% set mac = node.mac | replace(':', '_') %}
|
||||
- id: ssh_transport{{ mac }}
|
||||
from: resources/transport_ssh
|
||||
values:
|
||||
ssh_user: 'root'
|
||||
ssh_key: '/vagrant/tmp/keys/ssh_private'
|
||||
- id: transports{{mac}}
|
||||
from: resources/transports
|
||||
values:
|
||||
transports:key: ssh_transport{{mac}}::ssh_key
|
||||
transports:user: ssh_transport{{mac}}::ssh_user
|
||||
transports:port: ssh_transport{{mac}}::ssh_port
|
||||
transports:name: ssh_transport{{mac}}::name
|
||||
- id: node_{{mac}}
|
||||
from: resources/not_provisioned_node
|
||||
values:
|
||||
ip: {{node.ip}}
|
||||
transports_id: transports{{mac}}::transports_id
|
||||
name: node_{{mac}}
|
||||
admin_mac: {{node.mac}}
|
||||
{% endfor %}
|
||||
|
||||
- id: ssh_transport_master
|
||||
from: resources/transport_ssh
|
||||
values:
|
||||
ssh_user: 'vagrant'
|
||||
ssh_key: '/vagrant/.vagrant/machines/solar-dev/virtualbox/private_key'
|
||||
- id: transports_master
|
||||
from: resources/transports
|
||||
values:
|
||||
transports:key: ssh_transport_master::ssh_key
|
||||
transports:user: ssh_transport_master::ssh_user
|
||||
transports:port: ssh_transport_master::ssh_port
|
||||
transports:name: ssh_transport_master::name
|
||||
- id: node_master
|
||||
from: resources/ro_node
|
||||
values:
|
||||
name: node_master
|
||||
ip: '10.0.2.15'
|
||||
transports_id: transports_master::transports_id
|
Loading…
Reference in New Issue
Block a user