Add packer build for docker container
* Add docker Ubuntu Trusty packer template. * Rework cmtools to install ansible and puppet * Add support for Vagrant provider docker. * Containers will share the host system docker daemon and images in the storage, so may be run nested. * Ensure the rsyslogd and sshd is running in containers. * Also ensure the /var/log/solar dir, which is required for docker. * W/a Solar-hardcoded and vbox specific ssh key paths for docker case. * Update FAQ and docs Depends-on: I881d362968a10d816cbd368cb185900dfaa0b3bc Partial-bug: #1547587 Note, vagrant does not support vm.network and exec based provisioning for docker, so there are workarounds. Change-Id: Idf91bd6ed2fb90601f517064705df7721aeae3fb Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
This commit is contained in:
parent
58683af802
commit
e1e304c4f9
136
Vagrantfile
vendored
136
Vagrantfile
vendored
@ -16,12 +16,22 @@
|
|||||||
|
|
||||||
Vagrant.require_version ">= 1.7.4"
|
Vagrant.require_version ">= 1.7.4"
|
||||||
|
|
||||||
|
require 'etc'
|
||||||
|
require 'log4r'
|
||||||
require 'yaml'
|
require 'yaml'
|
||||||
|
|
||||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||||
VAGRANTFILE_API_VERSION = "2"
|
VAGRANTFILE_API_VERSION = "2"
|
||||||
|
HOME=Etc.getpwuid.dir
|
||||||
|
# Solar specific key paths mappings
|
||||||
|
INSECURE_KEY="#{HOME}/.vagrant.d/insecure_private_key"
|
||||||
|
KEY_PATH1="/vagrant/tmp/keys/ssh_private"
|
||||||
|
def get_machine_key (index = '')
|
||||||
|
"/vagrant/.vagrant/machines/solar-dev#{index}/virtualbox/private_key"
|
||||||
|
end
|
||||||
|
|
||||||
# configs, custom updates _defaults
|
# configs, custom updates _defaults
|
||||||
|
@logger = Log4r::Logger.new("vagrant::docker::driver")
|
||||||
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
|
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
|
||||||
if File.exist?('vagrant-settings.yaml')
|
if File.exist?('vagrant-settings.yaml')
|
||||||
custom_cfg = YAML.load_file('vagrant-settings.yaml')
|
custom_cfg = YAML.load_file('vagrant-settings.yaml')
|
||||||
@ -44,30 +54,114 @@ MASTER_CPUS = cfg["master_cpus"]
|
|||||||
SLAVES_CPUS = cfg["slaves_cpus"]
|
SLAVES_CPUS = cfg["slaves_cpus"]
|
||||||
PARAVIRT_PROVIDER = cfg.fetch('paravirtprovider', false)
|
PARAVIRT_PROVIDER = cfg.fetch('paravirtprovider', false)
|
||||||
PREPROVISIONED = cfg.fetch('preprovisioned', true)
|
PREPROVISIONED = cfg.fetch('preprovisioned', true)
|
||||||
|
DOCKER_MASTER_IMAGE=cfg['docker_master_image']
|
||||||
|
DOCKER_SLAVES_IMAGE=cfg['docker_slaves_image']
|
||||||
|
DOCKER_CMD=cfg['docker_cmd']
|
||||||
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
|
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
|
||||||
|
|
||||||
# Initialize noop plugins only in case of PXE boot
|
# Initialize noop plugins only in case of PXE boot
|
||||||
require_relative 'bootstrap/vagrant_plugins/noop' unless PREPROVISIONED
|
require_relative 'bootstrap/vagrant_plugins/noop' unless PREPROVISIONED
|
||||||
|
|
||||||
|
# FIXME(bogdando) more natively to distinguish a provider specific logic
|
||||||
|
provider = (ARGV[2] || ENV['VAGRANT_DEFAULT_PROVIDER'] || :docker).to_sym
|
||||||
|
|
||||||
def ansible_playbook_command(filename, args=[])
|
def ansible_playbook_command(filename, args=[])
|
||||||
"ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
|
ansible_script_crafted = "ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
|
||||||
|
@logger.info("Crafted ansible-script: #{ansible_script_crafted})")
|
||||||
|
ansible_script_crafted
|
||||||
|
end
|
||||||
|
|
||||||
|
def shell_script(filename, args=[])
|
||||||
|
shell_script_crafted = "/bin/bash #{filename} #{args.join ' '} 2>/dev/null"
|
||||||
|
@logger.info("Crafted shell-script: #{shell_script_crafted})")
|
||||||
|
shell_script_crafted
|
||||||
|
end
|
||||||
|
|
||||||
|
# W/a unimplemented docker-exec, see https://github.com/mitchellh/vagrant/issues/4179
|
||||||
|
# Use docker exec instead of the SSH provisioners
|
||||||
|
# TODO(bogdando) lxc-docker support (there is no exec)
|
||||||
|
def docker_exec (name, script)
|
||||||
|
@logger.info("Executing docker-exec at #{name}: #{script}")
|
||||||
|
system "docker exec -it #{name} #{script}"
|
||||||
end
|
end
|
||||||
|
|
||||||
solar_script = ansible_playbook_command("solar.yaml")
|
solar_script = ansible_playbook_command("solar.yaml")
|
||||||
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
|
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
|
||||||
|
|
||||||
master_pxe = ansible_playbook_command("pxe.yaml")
|
master_pxe = ansible_playbook_command("pxe.yaml")
|
||||||
|
|
||||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
if provider == :docker
|
||||||
|
# TODO(bogdando) use https://github.com/jpetazzo/pipework for multi net.
|
||||||
|
# Hereafter, we will use only the 1st IP address and a single interface.
|
||||||
|
# Also prepare docker volumes and workaround missing machines' ssh_keys
|
||||||
|
# and virtualbox hardcoded paths in Solar
|
||||||
|
key=get_machine_key
|
||||||
|
docker_volumes = ["-v", "#{INSECURE_KEY}:#{KEY_PATH1}:ro"]
|
||||||
|
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro",
|
||||||
|
"-v", "/sys/fs/cgroup:/sys/fs/cgroup",
|
||||||
|
"-v", "/var/run/docker.sock:/var/run/docker.sock" ]
|
||||||
|
SLAVES_COUNT.times do |i|
|
||||||
|
index = i + 1
|
||||||
|
key = get_machine_key index.to_s
|
||||||
|
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro"]
|
||||||
|
end
|
||||||
|
docker_volumes.flatten
|
||||||
|
@logger.info("Crafted docker volumes: #{docker_volumes}")
|
||||||
|
end
|
||||||
|
|
||||||
config.vm.define "solar-dev", primary: true do |config|
|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
|
if provider == :docker
|
||||||
|
# W/a unimplemented docker networking, see
|
||||||
|
# https://github.com/mitchellh/vagrant/issues/6667.
|
||||||
|
# Create or delete the solar net (depends on the vagrant action)
|
||||||
|
config.trigger.before :up do
|
||||||
|
system <<-SCRIPT
|
||||||
|
if ! docker network inspect solar >/dev/null 2>&1 ; then
|
||||||
|
docker network create -d bridge \
|
||||||
|
-o "com.docker.network.bridge.enable_icc"="true" \
|
||||||
|
-o "com.docker.network.bridge.enable_ip_masquerade"="true" \
|
||||||
|
-o "com.docker.network.driver.mtu"="1500" \
|
||||||
|
--gateway=#{SLAVES_IPS[0]}1 \
|
||||||
|
--ip-range=#{SLAVES_IPS[0]}0/24 \
|
||||||
|
--subnet=#{SLAVES_IPS[0]}0/24 \
|
||||||
|
solar >/dev/null 2>&1
|
||||||
|
fi
|
||||||
|
SCRIPT
|
||||||
|
end
|
||||||
|
config.trigger.after :destroy do
|
||||||
|
system <<-SCRIPT
|
||||||
|
docker network rm solar >/dev/null 2>&1
|
||||||
|
SCRIPT
|
||||||
|
end
|
||||||
|
config.vm.provider :docker do |d, override|
|
||||||
|
d.image = DOCKER_MASTER_IMAGE
|
||||||
|
d.remains_running = false
|
||||||
|
d.has_ssh = false
|
||||||
|
d.cmd = DOCKER_CMD.split(' ')
|
||||||
|
end
|
||||||
|
else
|
||||||
config.vm.box = MASTER_IMAGE
|
config.vm.box = MASTER_IMAGE
|
||||||
config.vm.box_version = MASTER_IMAGE_VERSION
|
config.vm.box_version = MASTER_IMAGE_VERSION
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.define "solar-dev", primary: true do |config|
|
||||||
|
config.vm.host_name = "solar-dev"
|
||||||
|
if provider == :docker
|
||||||
|
config.vm.provider :docker do |d, override|
|
||||||
|
d.name = "solar-dev"
|
||||||
|
d.create_args = ["-i", "-t", "--privileged", "--ip=#{MASTER_IPS[0]}", "--net=solar",
|
||||||
|
docker_volumes].flatten
|
||||||
|
end
|
||||||
|
config.trigger.after :up, :option => { :vm => 'solar-dev'} do
|
||||||
|
docker_exec("solar-dev","/usr/sbin/rsyslogd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev","/usr/sbin/sshd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev","#{solar_script} >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev","SOLAR_DB_BACKEND=#{SOLAR_DB_BACKEND} #{master_pxe} >/dev/null 2>&1") unless PREPROVISIONED
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# not the docker provider
|
||||||
config.vm.provision "shell", inline: solar_script, privileged: true, env: {"SOLAR_DB_BACKEND": SOLAR_DB_BACKEND}
|
config.vm.provision "shell", inline: solar_script, privileged: true, env: {"SOLAR_DB_BACKEND": SOLAR_DB_BACKEND}
|
||||||
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
|
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
|
||||||
config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
|
config.vm.provision "file", source: INSECURE_KEY, destination: KEY_PATH1
|
||||||
config.vm.host_name = "solar-dev"
|
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |v|
|
config.vm.provider :virtualbox do |v|
|
||||||
v.memory = MASTER_RAM
|
v.memory = MASTER_RAM
|
||||||
@ -94,6 +188,12 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||||||
libvirt.disk_bus = "virtio"
|
libvirt.disk_bus = "virtio"
|
||||||
end
|
end
|
||||||
|
|
||||||
|
ind = 0
|
||||||
|
MASTER_IPS.each do |ip|
|
||||||
|
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
|
||||||
|
ind = ind + 1
|
||||||
|
end
|
||||||
|
|
||||||
if SYNC_TYPE == 'nfs'
|
if SYNC_TYPE == 'nfs'
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
||||||
end
|
end
|
||||||
@ -101,11 +201,6 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||||||
config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
||||||
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
||||||
end
|
end
|
||||||
|
|
||||||
ind = 0
|
|
||||||
MASTER_IPS.each do |ip|
|
|
||||||
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
|
|
||||||
ind = ind + 1
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -113,11 +208,24 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||||||
index = i + 1
|
index = i + 1
|
||||||
ip_index = i + 3
|
ip_index = i + 3
|
||||||
config.vm.define "solar-dev#{index}" do |config|
|
config.vm.define "solar-dev#{index}" do |config|
|
||||||
|
config.vm.host_name = "solar-dev#{index}"
|
||||||
|
if provider == :docker
|
||||||
|
config.vm.provider :docker do |d, override|
|
||||||
|
d.name = "solar-dev#{index}"
|
||||||
|
d.image = DOCKER_SLAVES_IMAGE
|
||||||
|
d.create_args = ["-i", "-t", "--privileged", "--ip=#{SLAVES_IPS[0]}#{ip_index}", "--net=solar",
|
||||||
|
docker_volumes].flatten
|
||||||
|
end
|
||||||
|
config.trigger.after :up, :option => { :vm => "solar-dev#{index}" } do
|
||||||
|
docker_exec("solar-dev#{index}","/usr/sbin/rsyslogd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev#{index}","/usr/sbin/sshd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev#{index}","#{solar_agent_script} >/dev/null 2>&1") if PREPROVISIONED
|
||||||
|
end
|
||||||
|
else
|
||||||
|
# not the docker provider
|
||||||
# Standard box with all stuff preinstalled
|
# Standard box with all stuff preinstalled
|
||||||
config.vm.box = SLAVES_IMAGE
|
config.vm.box = SLAVES_IMAGE
|
||||||
config.vm.box_version = SLAVES_IMAGE_VERSION
|
config.vm.box_version = SLAVES_IMAGE_VERSION
|
||||||
config.vm.host_name = "solar-dev#{index}"
|
|
||||||
|
|
||||||
if PREPROVISIONED
|
if PREPROVISIONED
|
||||||
config.vm.provision "shell", inline: solar_agent_script, privileged: true
|
config.vm.provision "shell", inline: solar_agent_script, privileged: true
|
||||||
@ -175,7 +283,7 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,8 @@
|
|||||||
# Solar image building
|
# Solar image building
|
||||||
|
|
||||||
|
[Atlas Vagrant Boxes (Ubuntu 14.04)](https://atlas.hashicorp.com/solar-project/boxes)
|
||||||
|
| [Docker Image (Ubuntu 14.04)](https://hub.docker.com/r/bogdando/solar-master)
|
||||||
|
|
||||||
Building on the host OS
|
Building on the host OS
|
||||||
-----------------------
|
-----------------------
|
||||||
|
|
||||||
@ -27,6 +30,16 @@ $ vagrant up --provider libvirt
|
|||||||
```
|
```
|
||||||
Note, this requires a vagrant-libvirt plugin.
|
Note, this requires a vagrant-libvirt plugin.
|
||||||
|
|
||||||
|
To build for a docker, use:
|
||||||
|
```
|
||||||
|
# docker pull ubuntu:trusty
|
||||||
|
$ packer build -only=docker solar-master-docker.json
|
||||||
|
$ cd ..
|
||||||
|
$ vagrant up --provider docker
|
||||||
|
```
|
||||||
|
Note, this requires a vagrant-triggers plugin.
|
||||||
|
The minimal docker 1.10.0 version is required.
|
||||||
|
|
||||||
Building in the docker container
|
Building in the docker container
|
||||||
--------------------------------
|
--------------------------------
|
||||||
|
|
||||||
|
@ -48,21 +48,23 @@ echo "==> Clearing last login information"
|
|||||||
>/var/log/wtmp
|
>/var/log/wtmp
|
||||||
>/var/log/btmp
|
>/var/log/btmp
|
||||||
|
|
||||||
# Whiteout root
|
if [ "${cleanup}" = "true" ] ; then
|
||||||
count=$(df --sync -kP / | tail -n1 | awk -F ' ' '{print $4}')
|
# Whiteout root
|
||||||
let count--
|
count=$(df --sync -kP / | tail -n1 | awk -F ' ' '{print $4}')
|
||||||
dd if=/dev/zero of=/tmp/whitespace bs=1024 count=$count
|
let count--
|
||||||
rm /tmp/whitespace
|
dd if=/dev/zero of=/tmp/whitespace bs=1024 count=$count
|
||||||
|
rm /tmp/whitespace
|
||||||
|
|
||||||
# Whiteout /boot
|
# Whiteout /boot
|
||||||
count=$(df --sync -kP /boot | tail -n1 | awk -F ' ' '{print $4}')
|
count=$(df --sync -kP /boot | tail -n1 | awk -F ' ' '{print $4}')
|
||||||
let count--
|
let count--
|
||||||
dd if=/dev/zero of=/boot/whitespace bs=1024 count=$count
|
dd if=/dev/zero of=/boot/whitespace bs=1024 count=$count
|
||||||
rm /boot/whitespace
|
rm /boot/whitespace
|
||||||
|
|
||||||
# Zero out the free space to save space in the final image
|
# Zero out the free space to save space in the final image
|
||||||
dd if=/dev/zero of=/EMPTY bs=1M
|
dd if=/dev/zero of=/EMPTY bs=1M
|
||||||
rm -f /EMPTY
|
rm -f /EMPTY
|
||||||
|
fi
|
||||||
|
|
||||||
# Make sure we wait until all the data is written to disk, otherwise
|
# Make sure we wait until all the data is written to disk, otherwise
|
||||||
# Packer might quite too early before the large files are deleted
|
# Packer might quite too early before the large files are deleted
|
||||||
|
126
bootstrap/playbooks/files/cmtool.sh
Normal file
126
bootstrap/playbooks/files/cmtool.sh
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
#!/bin/bash -eux
|
||||||
|
|
||||||
|
# CM and CM_VERSION variables should be set inside of the Packer template:
|
||||||
|
#
|
||||||
|
# Values for CM can be:
|
||||||
|
# 'nocm' -- build a box without a configuration management tool
|
||||||
|
# 'chef' -- build a box with Chef
|
||||||
|
# 'chefdk' -- build a box with Chef Development Kit
|
||||||
|
# 'salt' -- build a box with Salt
|
||||||
|
# 'puppet' -- build a box with Puppet
|
||||||
|
#
|
||||||
|
# Values for CM_VERSION can be (when CM is chef|chefdk|salt|puppet):
|
||||||
|
# 'x.y.z' -- build a box with version x.y.z of Chef
|
||||||
|
# 'x.y' -- build a box with version x.y of Salt
|
||||||
|
# 'x.y.z-apuppetlabsb' -- build a box with package version of Puppet
|
||||||
|
# 'latest' -- build a box with the latest version
|
||||||
|
#
|
||||||
|
# Set CM_VERSION to 'latest' if unset because it can be problematic
|
||||||
|
# to set variables in pairs with Packer (and Packer does not support
|
||||||
|
# multi-value variables).
|
||||||
|
CM_VERSION=${CM_VERSION:-latest}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Provisioner installs.
|
||||||
|
#
|
||||||
|
|
||||||
|
install_chef()
|
||||||
|
{
|
||||||
|
echo "==> Installing Chef"
|
||||||
|
if [[ ${CM_VERSION} == 'latest' ]]; then
|
||||||
|
echo "Installing latest Chef version"
|
||||||
|
curl -Lk https://www.getchef.com/chef/install.sh | bash
|
||||||
|
else
|
||||||
|
echo "Installing Chef version ${CM_VERSION}"
|
||||||
|
curl -Lk https://www.getchef.com/chef/install.sh | bash -s -- -v $CM_VERSION
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_chef_dk()
|
||||||
|
{
|
||||||
|
echo "==> Installing Chef Development Kit"
|
||||||
|
if [[ ${CM_VERSION:-} == 'latest' ]]; then
|
||||||
|
echo "==> Installing latest Chef Development Kit version"
|
||||||
|
curl -Lk https://www.getchef.com/chef/install.sh | sh -s -- -P chefdk
|
||||||
|
else
|
||||||
|
echo "==> Installing Chef Development Kit ${CM_VERSION}"
|
||||||
|
curl -Lk https://www.getchef.com/chef/install.sh | sh -s -- -P chefdk -v ${CM_VERSION}
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "==> Adding Chef Development Kit and Ruby to PATH"
|
||||||
|
echo 'eval "$(chef shell-init bash)"' >> /home/vagrant/.bash_profile
|
||||||
|
chown vagrant /home/vagrant/.bash_profile
|
||||||
|
}
|
||||||
|
|
||||||
|
install_salt()
|
||||||
|
{
|
||||||
|
echo "==> Installing Salt"
|
||||||
|
if [[ ${CM_VERSION:-} == 'latest' ]]; then
|
||||||
|
echo "Installing latest Salt version"
|
||||||
|
wget -O - http://bootstrap.saltstack.org | sudo sh
|
||||||
|
else
|
||||||
|
echo "Installing Salt version $CM_VERSION"
|
||||||
|
curl -L http://bootstrap.saltstack.org | sudo sh -s -- git $CM_VERSION
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
install_puppet()
|
||||||
|
{
|
||||||
|
echo "==> Installing Puppet"
|
||||||
|
. /etc/lsb-release
|
||||||
|
|
||||||
|
DEB_NAME=puppetlabs-release-${DISTRIB_CODENAME}.deb
|
||||||
|
wget http://apt.puppetlabs.com/${DEB_NAME}
|
||||||
|
dpkg -i ${DEB_NAME}
|
||||||
|
apt-get update
|
||||||
|
if [[ ${CM_VERSION:-} == 'latest' ]]; then
|
||||||
|
echo "Installing latest Puppet version"
|
||||||
|
apt-get install -y puppet
|
||||||
|
else
|
||||||
|
echo "Installing Puppet version $CM_VERSION"
|
||||||
|
apt-get install -y puppet-common=$CM_VERSION puppet=$CM_VERSION
|
||||||
|
fi
|
||||||
|
rm -f ${DEB_NAME}
|
||||||
|
}
|
||||||
|
|
||||||
|
install_ansible()
|
||||||
|
{
|
||||||
|
echo "==> Installing Ansible python egg"
|
||||||
|
# TODO(bogdando): maybe this is better:
|
||||||
|
# http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-apt-ubuntu
|
||||||
|
apt-get remove -f python-pip
|
||||||
|
sudo apt-get install -y python-setuptools
|
||||||
|
sudo easy_install pip
|
||||||
|
sudo pip install -U pip
|
||||||
|
sudo pip install ansible
|
||||||
|
}
|
||||||
|
|
||||||
|
#
|
||||||
|
# Main script
|
||||||
|
#
|
||||||
|
|
||||||
|
case "${CM}" in
|
||||||
|
'chef')
|
||||||
|
install_chef
|
||||||
|
;;
|
||||||
|
|
||||||
|
'chefdk')
|
||||||
|
install_chef_dk
|
||||||
|
;;
|
||||||
|
|
||||||
|
'salt')
|
||||||
|
install_salt
|
||||||
|
;;
|
||||||
|
|
||||||
|
'puppet')
|
||||||
|
install_puppet
|
||||||
|
;;
|
||||||
|
|
||||||
|
'ansible')
|
||||||
|
install_ansible
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
echo "==> Building box without baking in a configuration management tool"
|
||||||
|
;;
|
||||||
|
esac
|
@ -1,3 +1,3 @@
|
|||||||
#!/bin/bash -eux
|
#!/bin/bash -eux
|
||||||
|
mkdir -p /var/run/sshd
|
||||||
echo "UseDNS no" >> /etc/ssh/sshd_config
|
echo "UseDNS no" >> /etc/ssh/sshd_config
|
||||||
|
3
bootstrap/playbooks/files/sudo.sh
Normal file
3
bootstrap/playbooks/files/sudo.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/sh -eux
|
||||||
|
apt-get -y install sudo
|
||||||
|
exit 0
|
27
bootstrap/playbooks/files/tools.sh
Normal file
27
bootstrap/playbooks/files/tools.sh
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
#!/bin/bash -eux
|
||||||
|
# Install basic packages and build requirements for ansible/librarian-puppet
|
||||||
|
|
||||||
|
PACKAGES="
|
||||||
|
git
|
||||||
|
make
|
||||||
|
ruby-dev
|
||||||
|
python-dev
|
||||||
|
autoconf
|
||||||
|
g++
|
||||||
|
openssh-server
|
||||||
|
iputils-ping
|
||||||
|
rsyslog
|
||||||
|
psmisc
|
||||||
|
iputils-ping
|
||||||
|
iptables
|
||||||
|
less
|
||||||
|
curl
|
||||||
|
wget
|
||||||
|
rsync
|
||||||
|
elvis-tiny
|
||||||
|
screen
|
||||||
|
tcpdump
|
||||||
|
strace
|
||||||
|
"
|
||||||
|
apt-get -y install $PACKAGES
|
||||||
|
exit 0
|
@ -1,11 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# TODO: maybe this is better:
|
|
||||||
# http://docs.ansible.com/ansible/intro_installation.html#latest-releases-via-apt-ubuntu
|
|
||||||
|
|
||||||
sudo apt-get remove -f python-pip
|
|
||||||
sudo apt-get update
|
|
||||||
sudo apt-get install -y python-setuptools python-dev autoconf g++
|
|
||||||
sudo easy_install pip
|
|
||||||
sudo pip install -U pip
|
|
||||||
sudo pip install "ansible"
|
|
@ -57,6 +57,7 @@
|
|||||||
line: eval "$(_SOLAR_COMPLETE=source solar)"
|
line: eval "$(_SOLAR_COMPLETE=source solar)"
|
||||||
state: present
|
state: present
|
||||||
|
|
||||||
|
- file: path=/var/log/solar state=directory owner=vagrant
|
||||||
- file: path=/var/lib/solar/repositories state=directory owner=vagrant
|
- file: path=/var/lib/solar/repositories state=directory owner=vagrant
|
||||||
- file: src=/vagrant/solar-resources/resources dest=/var/lib/solar/repositories/resources state=link owner=vagrant
|
- file: src=/vagrant/solar-resources/resources dest=/var/lib/solar/repositories/resources state=link owner=vagrant
|
||||||
- file: src=/vagrant/solar-resources/templates dest=/var/lib/solar/repositories/templates state=link owner=vagrant
|
- file: src=/vagrant/solar-resources/templates dest=/var/lib/solar/repositories/templates state=link owner=vagrant
|
||||||
|
@ -1,11 +1,6 @@
|
|||||||
---
|
---
|
||||||
|
|
||||||
# Puppet
|
# Puppet related
|
||||||
- shell: sudo wget https://apt.puppetlabs.com/puppetlabs-release-trusty.deb -O /root/puppetlabs-release-trusty.deb
|
|
||||||
- shell: sudo dpkg -i /root/puppetlabs-release-trusty.deb
|
|
||||||
- shell: sudo apt-get update
|
|
||||||
|
|
||||||
- apt: name=puppet state=present
|
|
||||||
- template: src=files/hiera.yaml dest=/etc/puppet/hiera.yaml
|
- template: src=files/hiera.yaml dest=/etc/puppet/hiera.yaml
|
||||||
- file: path=/etc/puppet/hieradata state=directory
|
- file: path=/etc/puppet/hieradata state=directory
|
||||||
# Make paths puppet 4 compatible
|
# Make paths puppet 4 compatible
|
||||||
|
118
bootstrap/solar-master-docker.json
Normal file
118
bootstrap/solar-master-docker.json
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
{
|
||||||
|
"variables": {
|
||||||
|
"cm": "puppet",
|
||||||
|
"cm_version": "latest",
|
||||||
|
"cleanup_pause": "",
|
||||||
|
"headless": "{{env `headless`}}",
|
||||||
|
"update": "true",
|
||||||
|
"ssh_username": "vagrant",
|
||||||
|
"ssh_password": "vagrant",
|
||||||
|
"install_vagrant_key": "true",
|
||||||
|
"http_proxy": "{{env `http_proxy`}}",
|
||||||
|
"https_proxy": "{{env `https_proxy`}}",
|
||||||
|
"ftp_proxy": "{{env `ftp_proxy`}}",
|
||||||
|
"rsync_proxy": "{{env `rsync_proxy`}}",
|
||||||
|
"no_proxy": "{{env `no_proxy`}}",
|
||||||
|
"cleanup": "false",
|
||||||
|
"ansible_config_path": "/etc/ansible",
|
||||||
|
"ansible_config_file": "ansible.cfg",
|
||||||
|
"ansible_log_file": "/var/tmp/ansible.log"
|
||||||
|
},
|
||||||
|
"builders": [{
|
||||||
|
"type": "docker",
|
||||||
|
"image": "ubuntu:trusty",
|
||||||
|
"commit": true,
|
||||||
|
"run_command": [
|
||||||
|
"-ti",
|
||||||
|
"--privileged",
|
||||||
|
"-v",
|
||||||
|
"/sys/fs/cgroup:/sys/fs/cgroup",
|
||||||
|
"-v",
|
||||||
|
"/var/run/docker.sock:/var/run/docker.sock",
|
||||||
|
"-d",
|
||||||
|
"{{.Image}}",
|
||||||
|
"/bin/bash"
|
||||||
|
]
|
||||||
|
}],
|
||||||
|
"provisioners": [{
|
||||||
|
"type": "shell",
|
||||||
|
"execute_command": "echo 'vagrant' | sh '{{.Path}}'",
|
||||||
|
"scripts": [
|
||||||
|
"playbooks/files/sudo.sh"
|
||||||
|
]}, {
|
||||||
|
"type": "shell",
|
||||||
|
"execute_command": "echo '{{user `ssh_password`}}' | {{ .Vars }} sudo -E -S sh '{{ .Path }}'",
|
||||||
|
"inline": [
|
||||||
|
"echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers"
|
||||||
|
]}, {
|
||||||
|
"type": "shell",
|
||||||
|
"inline": ["sudo mkdir -p {{ user `ansible_config_path` }}"]
|
||||||
|
}, {
|
||||||
|
"type": "shell",
|
||||||
|
"inline": [
|
||||||
|
"touch /tmp/ansible.cfg",
|
||||||
|
"sudo mv /tmp/ansible.cfg {{ user `ansible_config_path` }}/{{ user `ansible_config_file` }}",
|
||||||
|
"sudo echo '[defaults]' >> {{ user `ansible_config_path` }}/{{ user `ansible_config_file` }}",
|
||||||
|
"sudo echo 'log_path = {{ user `ansible_log_file` }}' >> {{ user `ansible_config_path` }}/{{ user `ansible_config_file` }}",
|
||||||
|
"sudo touch {{ user `ansible_log_file` }}",
|
||||||
|
"sudo chmod 666 {{ user `ansible_log_file` }}"
|
||||||
|
]}, {
|
||||||
|
"type": "shell",
|
||||||
|
"environment_vars": [
|
||||||
|
"CM={{user `cm`}}",
|
||||||
|
"CM_VERSION={{user `cm_version`}}",
|
||||||
|
"CLEANUP_PAUSE={{user `cleanup_pause`}}",
|
||||||
|
"UPDATE={{user `update`}}",
|
||||||
|
"INSTALL_VAGRANT_KEY={{user `install_vagrant_key`}}",
|
||||||
|
"SSH_USERNAME={{user `ssh_username`}}",
|
||||||
|
"SSH_PASSWORD={{user `ssh_password`}}",
|
||||||
|
"http_proxy={{user `http_proxy`}}",
|
||||||
|
"https_proxy={{user `https_proxy`}}",
|
||||||
|
"ftp_proxy={{user `ftp_proxy`}}",
|
||||||
|
"rsync_proxy={{user `rsync_proxy`}}",
|
||||||
|
"no_proxy={{user `no_proxy`}}"
|
||||||
|
],
|
||||||
|
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'",
|
||||||
|
"scripts": [
|
||||||
|
"playbooks/files/update.sh",
|
||||||
|
"playbooks/files/tools.sh",
|
||||||
|
"playbooks/files/cmtool.sh",
|
||||||
|
"playbooks/files/vagrant.sh",
|
||||||
|
"playbooks/files/sshd.sh"
|
||||||
|
]
|
||||||
|
}, {
|
||||||
|
"type": "shell",
|
||||||
|
"environment_vars": [
|
||||||
|
"CM=ansible",
|
||||||
|
"CM_VERSION=latest"
|
||||||
|
],
|
||||||
|
"script": "playbooks/files/cmtool.sh",
|
||||||
|
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'"
|
||||||
|
}, {
|
||||||
|
"type": "ansible-local",
|
||||||
|
"playbook_dir": "playbooks",
|
||||||
|
"playbook_file": "playbooks/build-main.yaml",
|
||||||
|
"extra_arguments": ["--verbose"]
|
||||||
|
}, {
|
||||||
|
"type": "shell",
|
||||||
|
"environment_vars": [
|
||||||
|
"cleanup={{user `cleanup`}}"
|
||||||
|
],
|
||||||
|
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'",
|
||||||
|
"scripts": [
|
||||||
|
"playbooks/files/vmtool.sh",
|
||||||
|
"playbooks/files/minimize.sh",
|
||||||
|
"playbooks/files/cleanup.sh"
|
||||||
|
]
|
||||||
|
}],
|
||||||
|
"post-processors": [
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"type": "docker-tag",
|
||||||
|
"repository": "bogdando/solar-master",
|
||||||
|
"tag": "latest",
|
||||||
|
"keep_input_artifact": false
|
||||||
|
}
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
@ -1,5 +1,7 @@
|
|||||||
{
|
{
|
||||||
"variables": {
|
"variables": {
|
||||||
|
"cm": "puppet",
|
||||||
|
"cm_version": "latest",
|
||||||
"cleanup_pause": "",
|
"cleanup_pause": "",
|
||||||
"headless": "{{env `headless`}}",
|
"headless": "{{env `headless`}}",
|
||||||
"update": "true",
|
"update": "true",
|
||||||
@ -17,6 +19,7 @@
|
|||||||
"disk_size": "4500",
|
"disk_size": "4500",
|
||||||
"mem_size": "512",
|
"mem_size": "512",
|
||||||
"vcpu_num": "1",
|
"vcpu_num": "1",
|
||||||
|
"cleanup": "true",
|
||||||
"ansible_config_path": "/etc/ansible",
|
"ansible_config_path": "/etc/ansible",
|
||||||
"ansible_config_file": "ansible.cfg",
|
"ansible_config_file": "ansible.cfg",
|
||||||
"ansible_log_file": "/var/tmp/ansible.log"
|
"ansible_log_file": "/var/tmp/ansible.log"
|
||||||
@ -124,6 +127,8 @@
|
|||||||
]}, {
|
]}, {
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"environment_vars": [
|
"environment_vars": [
|
||||||
|
"CM={{user `cm`}}",
|
||||||
|
"CM_VERSION={{user `cm_version`}}",
|
||||||
"CLEANUP_PAUSE={{user `cleanup_pause`}}",
|
"CLEANUP_PAUSE={{user `cleanup_pause`}}",
|
||||||
"UPDATE={{user `update`}}",
|
"UPDATE={{user `update`}}",
|
||||||
"INSTALL_VAGRANT_KEY={{user `install_vagrant_key`}}",
|
"INSTALL_VAGRANT_KEY={{user `install_vagrant_key`}}",
|
||||||
@ -138,12 +143,18 @@
|
|||||||
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'",
|
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'",
|
||||||
"scripts": [
|
"scripts": [
|
||||||
"playbooks/files/update.sh",
|
"playbooks/files/update.sh",
|
||||||
|
"playbooks/files/tools.sh",
|
||||||
|
"playbooks/files/cmtool.sh",
|
||||||
"playbooks/files/vagrant.sh",
|
"playbooks/files/vagrant.sh",
|
||||||
"playbooks/files/sshd.sh"
|
"playbooks/files/sshd.sh"
|
||||||
]
|
]
|
||||||
}, {
|
}, {
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
"script": "playbooks/files/ubuntu-ansible.sh",
|
"environment_vars": [
|
||||||
|
"CM=ansible",
|
||||||
|
"CM_VERSION=latest"
|
||||||
|
],
|
||||||
|
"script": "playbooks/files/cmtool.sh",
|
||||||
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'"
|
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'"
|
||||||
}, {
|
}, {
|
||||||
"type": "ansible-local",
|
"type": "ansible-local",
|
||||||
@ -152,6 +163,9 @@
|
|||||||
"extra_arguments": ["--verbose"]
|
"extra_arguments": ["--verbose"]
|
||||||
}, {
|
}, {
|
||||||
"type": "shell",
|
"type": "shell",
|
||||||
|
"environment_vars": [
|
||||||
|
"cleanup={{user `cleanup`}}"
|
||||||
|
],
|
||||||
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'",
|
"execute_command": "echo 'vagrant' | {{.Vars}} sudo -E -S bash '{{.Path}}'",
|
||||||
"scripts": [
|
"scripts": [
|
||||||
"playbooks/files/vmtool.sh",
|
"playbooks/files/vmtool.sh",
|
||||||
|
@ -60,9 +60,38 @@ By default for simplicity we use `sqlite`. On our vagrant environment we use
|
|||||||
single node `riak`.
|
single node `riak`.
|
||||||
You can also use multiple nodes `riak`, with some strong consistent buckets.
|
You can also use multiple nodes `riak`, with some strong consistent buckets.
|
||||||
|
|
||||||
|
.. _faq_solar_examples:
|
||||||
|
|
||||||
Where can I find solar examples ?
|
Where can I find solar examples ?
|
||||||
---------------------------------
|
---------------------------------
|
||||||
|
|
||||||
Example resources, composer templates and examples itself are located:
|
Example resources, composer templates and examples itself are located:
|
||||||
https://github.com/openstack/solar-resources
|
https://github.com/openstack/solar-resources
|
||||||
|
|
||||||
|
.. _faq_solar_docker:
|
||||||
|
|
||||||
|
Can I run solar nodes with docker ?
|
||||||
|
-----------------------------------
|
||||||
|
|
||||||
|
Yes, although that is an experimental feature and currently supports only
|
||||||
|
a single network interface per a container. Note, that before to run the
|
||||||
|
``vagrant up --provider docker`` command, the following preparations must be
|
||||||
|
done at the host system:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# docker pull solarproject/riak
|
||||||
|
# git clone https://github.com/kiasaki/docker-alpine-postgres.git
|
||||||
|
# cd docker-alpine-postgres
|
||||||
|
# make build && cd -
|
||||||
|
|
||||||
|
This will allow the solar nodes to run required nested docker containers.
|
||||||
|
|
||||||
|
.. note ::
|
||||||
|
The command ``vagrant ssh`` will not be working for the docker case.
|
||||||
|
Instead, use any of the following commands (with a correct name/IP):
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# ssh vagrant@10.0.0.2
|
||||||
|
# docker exec -it solar-dev bash
|
||||||
|
@ -31,15 +31,18 @@ Driver name should be registered in entrypoints, see :ref:`namespace_executors`
|
|||||||
|
|
||||||
tasks_driver
|
tasks_driver
|
||||||
^^^^^^^^^^^^
|
^^^^^^^^^^^^
|
||||||
Driver name should be registered in appropriate entrypoints (see :ref:`namespace_workers`)
|
Driver name should be registered in appropriate entrypoints
|
||||||
|
(see :ref:`namespace_workers`)
|
||||||
|
|
||||||
scheduler_driver
|
scheduler_driver
|
||||||
^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^
|
||||||
Driver name should be registered in appropriate entrypoints (see :ref:`namespace_workers`)
|
Driver name should be registered in appropriate entrypoints
|
||||||
|
(see :ref:`namespace_workers`)
|
||||||
|
|
||||||
system_log_driver
|
system_log_driver
|
||||||
^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^
|
||||||
Driver name should be registered in appropriate entrypoints (see :ref:`namespace_workers`)
|
Driver name should be registered in appropriate entrypoints
|
||||||
|
(see :ref:`namespace_workers`)
|
||||||
|
|
||||||
runner
|
runner
|
||||||
^^^^^^
|
^^^^^^
|
||||||
|
@ -7,6 +7,9 @@ master_image: solar-project/solar-master
|
|||||||
master_image_version: ">= 0.3.0.pre0"
|
master_image_version: ">= 0.3.0.pre0"
|
||||||
slaves_image: solar-project/solar-master
|
slaves_image: solar-project/solar-master
|
||||||
slaves_image_version: ">= 0.3.0.pre0"
|
slaves_image_version: ">= 0.3.0.pre0"
|
||||||
|
docker_master_image: solar-project/solar-master
|
||||||
|
docker_slaves_image: solar-project/solar-master
|
||||||
|
docker_cmd: "/sbin/init"
|
||||||
master_ram: 1024
|
master_ram: 1024
|
||||||
master_cpus: 1
|
master_cpus: 1
|
||||||
master_ips:
|
master_ips:
|
||||||
|
Loading…
Reference in New Issue
Block a user