Merge "Separate vagrantfile for the docker provider"
This commit is contained in:
commit
7952a8c577
299
Vagrantfile
vendored
299
Vagrantfile
vendored
@ -16,22 +16,12 @@
|
|||||||
|
|
||||||
Vagrant.require_version ">= 1.7.4"
|
Vagrant.require_version ">= 1.7.4"
|
||||||
|
|
||||||
require 'etc'
|
|
||||||
require 'log4r'
|
|
||||||
require 'yaml'
|
require 'yaml'
|
||||||
|
|
||||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||||
VAGRANTFILE_API_VERSION = "2"
|
VAGRANTFILE_API_VERSION = "2"
|
||||||
HOME=Etc.getpwuid.dir
|
|
||||||
# Solar specific key paths mappings
|
|
||||||
INSECURE_KEY="#{HOME}/.vagrant.d/insecure_private_key"
|
|
||||||
KEY_PATH1="/vagrant/tmp/keys/ssh_private"
|
|
||||||
def get_machine_key (index = '')
|
|
||||||
"/vagrant/.vagrant/machines/solar-dev#{index}/virtualbox/private_key"
|
|
||||||
end
|
|
||||||
|
|
||||||
# configs, custom updates _defaults
|
# configs, custom updates _defaults
|
||||||
@logger = Log4r::Logger.new("vagrant::docker::driver")
|
|
||||||
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
|
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
|
||||||
if File.exist?('vagrant-settings.yaml')
|
if File.exist?('vagrant-settings.yaml')
|
||||||
custom_cfg = YAML.load_file('vagrant-settings.yaml')
|
custom_cfg = YAML.load_file('vagrant-settings.yaml')
|
||||||
@ -54,157 +44,68 @@ MASTER_CPUS = cfg["master_cpus"]
|
|||||||
SLAVES_CPUS = cfg["slaves_cpus"]
|
SLAVES_CPUS = cfg["slaves_cpus"]
|
||||||
PARAVIRT_PROVIDER = cfg.fetch('paravirtprovider', false)
|
PARAVIRT_PROVIDER = cfg.fetch('paravirtprovider', false)
|
||||||
PREPROVISIONED = cfg.fetch('preprovisioned', true)
|
PREPROVISIONED = cfg.fetch('preprovisioned', true)
|
||||||
DOCKER_MASTER_IMAGE=cfg['docker_master_image']
|
|
||||||
DOCKER_SLAVES_IMAGE=cfg['docker_slaves_image']
|
|
||||||
DOCKER_CMD=cfg['docker_cmd']
|
|
||||||
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
|
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
|
||||||
|
|
||||||
# Initialize noop plugins only in case of PXE boot
|
# Initialize noop plugins only in case of PXE boot
|
||||||
require_relative 'bootstrap/vagrant_plugins/noop' unless PREPROVISIONED
|
require_relative 'bootstrap/vagrant_plugins/noop' unless PREPROVISIONED
|
||||||
|
|
||||||
# FIXME(bogdando) more natively to distinguish a provider specific logic
|
|
||||||
provider = (ARGV[2] || ENV['VAGRANT_DEFAULT_PROVIDER'] || :docker).to_sym
|
|
||||||
|
|
||||||
def ansible_playbook_command(filename, args=[])
|
def ansible_playbook_command(filename, args=[])
|
||||||
ansible_script_crafted = "ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
|
"ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
|
||||||
@logger.info("Crafted ansible-script: #{ansible_script_crafted})")
|
|
||||||
ansible_script_crafted
|
|
||||||
end
|
|
||||||
|
|
||||||
def shell_script(filename, env=[], args=[])
|
|
||||||
shell_script_crafted = "/bin/bash -c \"#{env.join ' '} #{filename} #{args.join ' '} 2>/dev/null\""
|
|
||||||
@logger.info("Crafted shell-script: #{shell_script_crafted})")
|
|
||||||
shell_script_crafted
|
|
||||||
end
|
|
||||||
|
|
||||||
# W/a unimplemented docker-exec, see https://github.com/mitchellh/vagrant/issues/4179
|
|
||||||
# Use docker exec instead of the SSH provisioners
|
|
||||||
# TODO(bogdando) lxc-docker support (there is no exec)
|
|
||||||
def docker_exec (name, script)
|
|
||||||
@logger.info("Executing docker-exec at #{name}: #{script}")
|
|
||||||
system "docker exec -it #{name} #{script}"
|
|
||||||
end
|
end
|
||||||
|
|
||||||
solar_script = ansible_playbook_command("solar.yaml")
|
solar_script = ansible_playbook_command("solar.yaml")
|
||||||
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
|
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
|
||||||
master_pxe = ansible_playbook_command("pxe.yaml")
|
|
||||||
fix_six = shell_script("/vagrant/bootstrap/playbooks/fix_centos7_six.sh")
|
|
||||||
solar_exec = shell_script("#{solar_script}", ["SOLAR_DB_BACKEND=#{SOLAR_DB_BACKEND}"])
|
|
||||||
|
|
||||||
if provider == :docker
|
master_pxe = ansible_playbook_command("pxe.yaml")
|
||||||
# Prepare docker volumes and workaround missing machines' ssh_keys
|
|
||||||
# and virtualbox hardcoded paths in Solar
|
|
||||||
key=get_machine_key
|
|
||||||
docker_volumes = ["-v", "#{INSECURE_KEY}:#{KEY_PATH1}:ro"]
|
|
||||||
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro",
|
|
||||||
"-v", "/var/tmp/vagrant:/var/tmp/vagrant",
|
|
||||||
"-v", "/sys/fs/cgroup:/sys/fs/cgroup",
|
|
||||||
"-v", "/var/run/docker.sock:/var/run/docker.sock" ]
|
|
||||||
SLAVES_COUNT.times do |i|
|
|
||||||
index = i + 1
|
|
||||||
key = get_machine_key index.to_s
|
|
||||||
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro"]
|
|
||||||
end
|
|
||||||
docker_volumes.flatten!
|
|
||||||
@logger.info("Crafted docker volumes: #{docker_volumes}")
|
|
||||||
end
|
|
||||||
|
|
||||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
if provider == :docker
|
|
||||||
# W/a unimplemented docker networking, see
|
|
||||||
# https://github.com/mitchellh/vagrant/issues/6667.
|
|
||||||
# Create or delete the solar net (depends on the vagrant action)
|
|
||||||
# TODO(bogdando) use https://github.com/jpetazzo/pipework for multi net.
|
|
||||||
# Hereafter, we will use only the 1st IP address and a single interface.
|
|
||||||
config.trigger.before :up do
|
|
||||||
system <<-SCRIPT
|
|
||||||
if ! docker network inspect solar >/dev/null 2>&1 ; then
|
|
||||||
docker network create -d bridge \
|
|
||||||
-o "com.docker.network.bridge.enable_icc"="true" \
|
|
||||||
-o "com.docker.network.bridge.enable_ip_masquerade"="true" \
|
|
||||||
-o "com.docker.network.driver.mtu"="1500" \
|
|
||||||
--gateway=#{SLAVES_IPS[0]}1 \
|
|
||||||
--ip-range=#{SLAVES_IPS[0]}0/24 \
|
|
||||||
--subnet=#{SLAVES_IPS[0]}0/24 \
|
|
||||||
solar >/dev/null 2>&1
|
|
||||||
fi
|
|
||||||
SCRIPT
|
|
||||||
end
|
|
||||||
config.trigger.after :destroy do
|
|
||||||
system <<-SCRIPT
|
|
||||||
docker network rm solar >/dev/null 2>&1
|
|
||||||
SCRIPT
|
|
||||||
end
|
|
||||||
config.vm.provider :docker do |d, override|
|
|
||||||
d.image = DOCKER_MASTER_IMAGE
|
|
||||||
d.remains_running = false
|
|
||||||
d.has_ssh = false
|
|
||||||
d.cmd = DOCKER_CMD.split(' ')
|
|
||||||
end
|
|
||||||
else
|
|
||||||
config.vm.box = MASTER_IMAGE
|
|
||||||
config.vm.box_version = MASTER_IMAGE_VERSION
|
|
||||||
end
|
|
||||||
|
|
||||||
config.vm.define "solar-dev", primary: true do |config|
|
config.vm.define "solar-dev", primary: true do |config|
|
||||||
|
config.vm.box = MASTER_IMAGE
|
||||||
|
config.vm.box_version = MASTER_IMAGE_VERSION
|
||||||
|
|
||||||
|
config.vm.provision "shell", inline: solar_script, privileged: true, env: {"SOLAR_DB_BACKEND": SOLAR_DB_BACKEND}
|
||||||
|
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
|
||||||
|
config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
|
||||||
config.vm.host_name = "solar-dev"
|
config.vm.host_name = "solar-dev"
|
||||||
if provider == :docker
|
|
||||||
config.vm.provider :docker do |d, override|
|
|
||||||
d.name = "solar-dev"
|
|
||||||
d.create_args = ["-i", "-t", "--privileged", "--ip=#{MASTER_IPS[0]}", "--net=solar",
|
|
||||||
docker_volumes].flatten
|
|
||||||
end
|
|
||||||
config.trigger.after :up, :option => { :vm => 'solar-dev'} do
|
|
||||||
docker_exec("solar-dev","/usr/sbin/rsyslogd >/dev/null 2>&1")
|
|
||||||
docker_exec("solar-dev","/usr/sbin/sshd >/dev/null 2>&1")
|
|
||||||
docker_exec("solar-dev","#{fix_six} >/dev/null 2>&1")
|
|
||||||
docker_exec("solar-dev","#{solar_exec}")
|
|
||||||
docker_exec("solar-dev","#{master_pxe} >/dev/null 2>&1") unless PREPROVISIONED
|
|
||||||
end
|
|
||||||
else
|
|
||||||
# not the docker provider
|
|
||||||
config.vm.provision "shell", inline: solar_script, privileged: true, env: {"SOLAR_DB_BACKEND": SOLAR_DB_BACKEND}
|
|
||||||
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
|
|
||||||
config.vm.provision "file", source: INSECURE_KEY, destination: KEY_PATH1
|
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |v|
|
config.vm.provider :virtualbox do |v|
|
||||||
v.memory = MASTER_RAM
|
v.memory = MASTER_RAM
|
||||||
v.cpus = MASTER_CPUS
|
v.cpus = MASTER_CPUS
|
||||||
v.customize [
|
v.customize [
|
||||||
"modifyvm", :id,
|
"modifyvm", :id,
|
||||||
"--memory", MASTER_RAM,
|
"--memory", MASTER_RAM,
|
||||||
"--cpus", MASTER_CPUS,
|
"--cpus", MASTER_CPUS,
|
||||||
"--ioapic", "on",
|
"--ioapic", "on",
|
||||||
]
|
]
|
||||||
if PARAVIRT_PROVIDER
|
if PARAVIRT_PROVIDER
|
||||||
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
|
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
|
||||||
end
|
|
||||||
v.name = "solar-dev"
|
|
||||||
end
|
end
|
||||||
|
v.name = "solar-dev"
|
||||||
|
end
|
||||||
|
|
||||||
config.vm.provider :libvirt do |libvirt|
|
config.vm.provider :libvirt do |libvirt|
|
||||||
libvirt.driver = 'kvm'
|
libvirt.driver = 'kvm'
|
||||||
libvirt.memory = MASTER_RAM
|
libvirt.memory = MASTER_RAM
|
||||||
libvirt.cpus = MASTER_CPUS
|
libvirt.cpus = MASTER_CPUS
|
||||||
libvirt.nested = true
|
libvirt.nested = true
|
||||||
libvirt.cpu_mode = 'host-passthrough'
|
libvirt.cpu_mode = 'host-passthrough'
|
||||||
libvirt.volume_cache = 'unsafe'
|
libvirt.volume_cache = 'unsafe'
|
||||||
libvirt.disk_bus = "virtio"
|
libvirt.disk_bus = "virtio"
|
||||||
end
|
end
|
||||||
|
|
||||||
ind = 0
|
if SYNC_TYPE == 'nfs'
|
||||||
MASTER_IPS.each do |ip|
|
config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
||||||
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
|
end
|
||||||
ind = ind + 1
|
if SYNC_TYPE == 'rsync'
|
||||||
end
|
config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
||||||
|
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
||||||
|
end
|
||||||
|
|
||||||
if SYNC_TYPE == 'nfs'
|
ind = 0
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
MASTER_IPS.each do |ip|
|
||||||
end
|
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
|
||||||
if SYNC_TYPE == 'rsync'
|
ind = ind + 1
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
|
||||||
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
@ -212,83 +113,69 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||||||
index = i + 1
|
index = i + 1
|
||||||
ip_index = i + 3
|
ip_index = i + 3
|
||||||
config.vm.define "solar-dev#{index}" do |config|
|
config.vm.define "solar-dev#{index}" do |config|
|
||||||
|
|
||||||
|
# Standard box with all stuff preinstalled
|
||||||
|
config.vm.box = SLAVES_IMAGE
|
||||||
|
config.vm.box_version = SLAVES_IMAGE_VERSION
|
||||||
config.vm.host_name = "solar-dev#{index}"
|
config.vm.host_name = "solar-dev#{index}"
|
||||||
if provider == :docker
|
|
||||||
config.vm.provider :docker do |d, override|
|
if PREPROVISIONED
|
||||||
d.name = "solar-dev#{index}"
|
config.vm.provision "shell", inline: solar_agent_script, privileged: true
|
||||||
d.image = DOCKER_SLAVES_IMAGE
|
#TODO(bogdando) figure out how to configure multiple interfaces when was not PREPROVISIONED
|
||||||
d.create_args = ["-i", "-t", "--privileged", "--ip=#{SLAVES_IPS[0]}#{ip_index}", "--net=solar",
|
ind = 0
|
||||||
docker_volumes].flatten
|
SLAVES_IPS.each do |ip|
|
||||||
end
|
config.vm.network :private_network, ip: "#{ip}#{ip_index}", :dev => "solbr#{ind}", :mode => 'nat'
|
||||||
config.trigger.after :up, :option => { :vm => "solar-dev#{index}" } do
|
ind = ind + 1
|
||||||
docker_exec("solar-dev#{index}","/usr/sbin/rsyslogd >/dev/null 2>&1")
|
|
||||||
docker_exec("solar-dev#{index}","/usr/sbin/sshd >/dev/null 2>&1")
|
|
||||||
docker_exec("solar-dev#{index}","#{fix_six} >/dev/null 2>&1")
|
|
||||||
docker_exec("solar-dev#{index}","#{solar_agent_script}") if PREPROVISIONED
|
|
||||||
end
|
end
|
||||||
else
|
else
|
||||||
# not the docker provider
|
# Disable attempts to install guest os and check that node is booted using ssh,
|
||||||
# Standard box with all stuff preinstalled
|
# because nodes will have ip addresses from dhcp, and vagrant doesn't know
|
||||||
config.vm.box = SLAVES_IMAGE
|
# which ip to use to perform connection
|
||||||
config.vm.box_version = SLAVES_IMAGE_VERSION
|
config.vm.communicator = :noop
|
||||||
|
config.vm.guest = :noop_guest
|
||||||
|
# Configure network to boot vm using pxe
|
||||||
|
config.vm.network "private_network", adapter: 1, ip: "10.0.0.#{ip_index}"
|
||||||
|
config.vbguest.no_install = true
|
||||||
|
config.vbguest.auto_update = false
|
||||||
|
end
|
||||||
|
|
||||||
if PREPROVISIONED
|
config.vm.provider :virtualbox do |v|
|
||||||
config.vm.provision "shell", inline: solar_agent_script, privileged: true
|
boot_order(v, ['net', 'disk'])
|
||||||
#TODO(bogdando) figure out how to configure multiple interfaces when was not PREPROVISIONED
|
v.customize [
|
||||||
ind = 0
|
"modifyvm", :id,
|
||||||
SLAVES_IPS.each do |ip|
|
"--memory", SLAVES_RAM,
|
||||||
config.vm.network :private_network, ip: "#{ip}#{ip_index}", :dev => "solbr#{ind}", :mode => 'nat'
|
"--cpus", SLAVES_CPUS,
|
||||||
ind = ind + 1
|
"--ioapic", "on",
|
||||||
end
|
"--macaddress1", "auto",
|
||||||
else
|
]
|
||||||
# Disable attempts to install guest os and check that node is booted using ssh,
|
if PARAVIRT_PROVIDER
|
||||||
# because nodes will have ip addresses from dhcp, and vagrant doesn't know
|
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
|
||||||
# which ip to use to perform connection
|
|
||||||
config.vm.communicator = :noop
|
|
||||||
config.vm.guest = :noop_guest
|
|
||||||
# Configure network to boot vm using pxe
|
|
||||||
config.vm.network "private_network", adapter: 1, ip: "10.0.0.#{ip_index}"
|
|
||||||
config.vbguest.no_install = true
|
|
||||||
config.vbguest.auto_update = false
|
|
||||||
end
|
end
|
||||||
|
v.name = "solar-dev#{index}"
|
||||||
|
end
|
||||||
|
|
||||||
config.vm.provider :virtualbox do |v|
|
config.vm.provider :libvirt do |libvirt|
|
||||||
boot_order(v, ['net', 'disk'])
|
libvirt.driver = 'kvm'
|
||||||
v.customize [
|
libvirt.memory = SLAVES_RAM
|
||||||
"modifyvm", :id,
|
libvirt.cpus = SLAVES_CPUS
|
||||||
"--memory", SLAVES_RAM,
|
libvirt.nested = true
|
||||||
"--cpus", SLAVES_CPUS,
|
libvirt.cpu_mode = 'host-passthrough'
|
||||||
"--ioapic", "on",
|
libvirt.volume_cache = 'unsafe'
|
||||||
"--macaddress1", "auto",
|
libvirt.disk_bus = "virtio"
|
||||||
]
|
end
|
||||||
if PARAVIRT_PROVIDER
|
|
||||||
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
|
if PREPROVISIONED
|
||||||
end
|
if SYNC_TYPE == 'nfs'
|
||||||
v.name = "solar-dev#{index}"
|
config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
||||||
end
|
end
|
||||||
|
if SYNC_TYPE == 'rsync'
|
||||||
config.vm.provider :libvirt do |libvirt|
|
config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
||||||
libvirt.driver = 'kvm'
|
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
||||||
libvirt.memory = SLAVES_RAM
|
|
||||||
libvirt.cpus = SLAVES_CPUS
|
|
||||||
libvirt.nested = true
|
|
||||||
libvirt.cpu_mode = 'host-passthrough'
|
|
||||||
libvirt.volume_cache = 'unsafe'
|
|
||||||
libvirt.disk_bus = "virtio"
|
|
||||||
end
|
|
||||||
|
|
||||||
if PREPROVISIONED
|
|
||||||
if SYNC_TYPE == 'nfs'
|
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
|
||||||
end
|
|
||||||
if SYNC_TYPE == 'rsync'
|
|
||||||
config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
|
||||||
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
|
||||||
end
|
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
||||||
|
|
||||||
|
160
Vagrantfile_docker
Normal file
160
Vagrantfile_docker
Normal file
@ -0,0 +1,160 @@
|
|||||||
|
# -*- mode: ruby -*-
|
||||||
|
# vi: set ft=ruby :
|
||||||
|
# Copyright 2015 Mirantis, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
Vagrant.require_version ">= 1.7.4"
|
||||||
|
|
||||||
|
require 'etc'
|
||||||
|
require 'log4r'
|
||||||
|
require 'yaml'
|
||||||
|
|
||||||
|
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||||
|
VAGRANTFILE_API_VERSION = "2"
|
||||||
|
ENV['VAGRANT_DEFAULT_PROVIDER'] = "docker"
|
||||||
|
HOME=Etc.getpwuid.dir
|
||||||
|
# Solar examples specific key paths mappings
|
||||||
|
INSECURE_KEY="#{HOME}/.vagrant.d/insecure_private_key"
|
||||||
|
KEY_PATH1="/vagrant/tmp/keys/ssh_private"
|
||||||
|
def get_machine_key (index = '')
|
||||||
|
"/vagrant/.vagrant/machines/solar-dev#{index}/virtualbox/private_key"
|
||||||
|
end
|
||||||
|
|
||||||
|
# configs, custom updates _defaults
|
||||||
|
@logger = Log4r::Logger.new("vagrant::docker::driver")
|
||||||
|
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
|
||||||
|
if File.exist?('vagrant-settings.yaml')
|
||||||
|
custom_cfg = YAML.load_file('vagrant-settings.yaml')
|
||||||
|
cfg = defaults_cfg.merge(custom_cfg)
|
||||||
|
else
|
||||||
|
cfg = defaults_cfg
|
||||||
|
end
|
||||||
|
|
||||||
|
SLAVES_COUNT = cfg["slaves_count"]
|
||||||
|
SLAVES_IPS = cfg["slaves_ips"]
|
||||||
|
MASTER_IPS = cfg["master_ips"]
|
||||||
|
DOCKER_MASTER_IMAGE=cfg['docker_master_image']
|
||||||
|
DOCKER_SLAVES_IMAGE=cfg['docker_slaves_image']
|
||||||
|
DOCKER_CMD=cfg['docker_cmd']
|
||||||
|
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
|
||||||
|
|
||||||
|
def ansible_playbook_command(filename, args=[])
|
||||||
|
ansible_script_crafted = "ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
|
||||||
|
@logger.info("Crafted ansible-script: #{ansible_script_crafted})")
|
||||||
|
ansible_script_crafted
|
||||||
|
end
|
||||||
|
|
||||||
|
def shell_script(filename, env=[], args=[])
|
||||||
|
shell_script_crafted = "/bin/bash -c \"#{env.join ' '} #{filename} #{args.join ' '} 2>/dev/null\""
|
||||||
|
@logger.info("Crafted shell-script: #{shell_script_crafted})")
|
||||||
|
shell_script_crafted
|
||||||
|
end
|
||||||
|
|
||||||
|
# FIXME(bogdando) w/a unimplemented docker-exec
|
||||||
|
# see https://github.com/mitchellh/vagrant/issues/4179
|
||||||
|
# Use docker exec instead of the SSH provisioners
|
||||||
|
def docker_exec (name, script)
|
||||||
|
@logger.info("Executing docker-exec at #{name}: #{script}")
|
||||||
|
system "docker exec -it #{name} #{script}"
|
||||||
|
end
|
||||||
|
|
||||||
|
solar_script = ansible_playbook_command("solar.yaml")
|
||||||
|
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
|
||||||
|
solar_exec = shell_script("#{solar_script}", ["SOLAR_DB_BACKEND=#{SOLAR_DB_BACKEND}"])
|
||||||
|
# NOTE(bogdando) w/a for a centos7 issue
|
||||||
|
fix_six = shell_script("/vagrant/bootstrap/playbooks/fix_centos7_six.sh")
|
||||||
|
|
||||||
|
# TODO(bogdando) use https://github.com/jpetazzo/pipework for multi net.
|
||||||
|
# Hereafter, we will use only the 1st IP address and a single interface.
|
||||||
|
# Define the solar net with the given gateway, ip-range, subnet.
|
||||||
|
docker_pre = shell_script("bootstrap/playbooks/docker_pre.sh", [],
|
||||||
|
[ "#{SLAVES_IPS[0]}1", "#{SLAVES_IPS[0]}0/24", "#{SLAVES_IPS[0]}0/24" ])
|
||||||
|
# Destroy the solar net and do additional teardown steps
|
||||||
|
docker_post = shell_script("bootstrap/playbooks/docker_post.sh")
|
||||||
|
|
||||||
|
# Prepare docker volumes and workaround missing machines' ssh_keys
|
||||||
|
# and virtualbox hardcoded paths in Solar
|
||||||
|
key=get_machine_key
|
||||||
|
docker_volumes = ["-v", "#{INSECURE_KEY}:#{KEY_PATH1}:ro"]
|
||||||
|
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro",
|
||||||
|
"-v", "/var/tmp/vagrant:/var/tmp/vagrant",
|
||||||
|
"-v", "/sys/fs/cgroup:/sys/fs/cgroup",
|
||||||
|
"-v", "/var/run/docker.sock:/var/run/docker.sock" ]
|
||||||
|
SLAVES_COUNT.times do |i|
|
||||||
|
index = i + 1
|
||||||
|
key = get_machine_key index.to_s
|
||||||
|
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro"]
|
||||||
|
end
|
||||||
|
docker_volumes.flatten!
|
||||||
|
@logger.info("Crafted docker volumes: #{docker_volumes}")
|
||||||
|
|
||||||
|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
|
config.trigger.before :up do
|
||||||
|
@logger.info("Executing docker network create steps")
|
||||||
|
system "#{docker_pre}"
|
||||||
|
end
|
||||||
|
config.trigger.after :destroy do
|
||||||
|
@logger.info("Executing docker teardown steps")
|
||||||
|
system "#{docker_post}"
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.provider :docker do |d, override|
|
||||||
|
d.image = DOCKER_MASTER_IMAGE
|
||||||
|
d.remains_running = false
|
||||||
|
d.has_ssh = false
|
||||||
|
d.cmd = DOCKER_CMD.split(' ')
|
||||||
|
end
|
||||||
|
|
||||||
|
# disable nfs, we use volume mounts instead
|
||||||
|
config.nfs.functional = false
|
||||||
|
config.vm.synced_folder ".", "/vagrant", :nfs => false
|
||||||
|
|
||||||
|
# Configure the solar-dev node
|
||||||
|
config.vm.define "solar-dev", primary: true do |config|
|
||||||
|
config.vm.host_name = "solar-dev"
|
||||||
|
config.vm.provider :docker do |d, override|
|
||||||
|
d.name = "solar-dev"
|
||||||
|
d.create_args = ["-i", "-t", "--privileged", "--ip=#{MASTER_IPS[0]}", "--net=solar",
|
||||||
|
docker_volumes].flatten
|
||||||
|
end
|
||||||
|
# NOTE(bogdando) ssh provisioners are not wellcome here
|
||||||
|
config.trigger.after :up, :option => { :vm => 'solar-dev'} do
|
||||||
|
docker_exec("solar-dev","/usr/sbin/rsyslogd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev","/usr/sbin/sshd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev","#{fix_six} >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev","#{solar_exec}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
# Configure the rest of the solar nodes
|
||||||
|
SLAVES_COUNT.times do |i|
|
||||||
|
index = i + 1
|
||||||
|
ip_index = i + 3
|
||||||
|
config.vm.define "solar-dev#{index}" do |config|
|
||||||
|
config.vm.host_name = "solar-dev#{index}"
|
||||||
|
config.vm.provider :docker do |d, override|
|
||||||
|
d.name = "solar-dev#{index}"
|
||||||
|
d.image = DOCKER_SLAVES_IMAGE
|
||||||
|
d.create_args = ["-i", "-t", "--privileged", "--ip=#{SLAVES_IPS[0]}#{ip_index}", "--net=solar",
|
||||||
|
docker_volumes].flatten
|
||||||
|
end
|
||||||
|
config.trigger.after :up, :option => { :vm => "solar-dev#{index}" } do
|
||||||
|
docker_exec("solar-dev#{index}","/usr/sbin/rsyslogd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev#{index}","/usr/sbin/sshd >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev#{index}","#{fix_six} >/dev/null 2>&1")
|
||||||
|
docker_exec("solar-dev#{index}","#{solar_agent_script}")
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
||||||
|
end
|
11
bootstrap/playbooks/docker_post.sh
Executable file
11
bootstrap/playbooks/docker_post.sh
Executable file
@ -0,0 +1,11 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# FIXME(bogdando) additional teardown steps after docker
|
||||||
|
# asks for sudo password
|
||||||
|
docker network rm solar
|
||||||
|
docker stop vagrant_pg_1
|
||||||
|
docker stop vagrant_riak_1
|
||||||
|
docker rm vagrant_pg_1
|
||||||
|
docker rm vagrant_riak_1
|
||||||
|
sudo rm -rf /tmp/solar*
|
||||||
|
sudo rm -rf tmp
|
||||||
|
sudo rm -f .vagrant/machines/solar-dev*/virtualbox/private_key
|
12
bootstrap/playbooks/docker_pre.sh
Executable file
12
bootstrap/playbooks/docker_pre.sh
Executable file
@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# FIXME(bogdando) w/a unimplemented docker networking, see
|
||||||
|
# https://github.com/mitchellh/vagrant/issues/6667.
|
||||||
|
# Create the docker solar net
|
||||||
|
if ! docker network inspect solar >/dev/null 2>&1 ; then
|
||||||
|
docker network create -d bridge \
|
||||||
|
-o "com.docker.network.bridge.enable_icc"="true" \
|
||||||
|
-o "com.docker.network.bridge.enable_ip_masquerade"="true" \
|
||||||
|
-o "com.docker.network.driver.mtu"="1500" \
|
||||||
|
--gateway=$1 --ip-range=$2 --subnet=$3 \
|
||||||
|
solar >/dev/null 2>&1
|
||||||
|
fi
|
@ -73,24 +73,27 @@ https://github.com/openstack/solar-resources
|
|||||||
Can I run solar nodes with docker ?
|
Can I run solar nodes with docker ?
|
||||||
-----------------------------------
|
-----------------------------------
|
||||||
|
|
||||||
Yes, although that is an experimental feature and currently supports only
|
Yes, the docker >=1.10.0 and the vagrant-triggers plugin are required.
|
||||||
a single network interface per a container. Note, that before to run the
|
Note that the vagrant docker provider is an *experimental* and supports
|
||||||
``vagrant up --provider docker`` command, the following preparations must be
|
only a single network interface per a container. There is a separate
|
||||||
done at the host system:
|
``Vagrantfile_docker`` file. Before using the
|
||||||
|
``vagrant up --provider docker`` command, rename it to the ``Vagrantfile``
|
||||||
|
and do the following preparations at the host system as well:
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# docker pull solarproject/riak
|
$ docker pull solarproject/riak
|
||||||
|
|
||||||
or, depending on the configured DB backend:
|
or, depending on the configured DB backend (this also requires the
|
||||||
|
packages make, autoconf, gcc-c++ or g++):
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# git clone https://github.com/kiasaki/docker-alpine-postgres.git
|
$ git clone https://github.com/kiasaki/docker-alpine-postgres.git
|
||||||
# cd docker-alpine-postgres
|
$ cd docker-alpine-postgres
|
||||||
# make build && cd -
|
$ make build && cd -
|
||||||
|
|
||||||
This will allow the solar nodes to run required nested DB containers.
|
Those will allow the solar nodes to run required nested DB containers.
|
||||||
|
|
||||||
.. note ::
|
.. note ::
|
||||||
The command ``vagrant ssh`` will not be working for the docker case.
|
The command ``vagrant ssh`` will not be working for the docker case.
|
||||||
@ -98,11 +101,21 @@ This will allow the solar nodes to run required nested DB containers.
|
|||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
# ssh vagrant@10.0.0.2
|
$ ssh vagrant@10.0.0.2
|
||||||
# docker exec -it solar-dev bash
|
$ docker exec -it solar-dev bash
|
||||||
|
|
||||||
.. note ::
|
.. note ::
|
||||||
The command ``vagrant destroy`` only cleans up containers for solar nodes
|
The command ``vagrant destroy`` only cleans up containers for solar nodes
|
||||||
and does not clean up other containers launched, like riak, postgres,
|
and does not clean up other containers launched, like riak, postgres,
|
||||||
kolla or the like. You should stop and remove them from the host system
|
kolla or the like. You should stop and remove them from the host system
|
||||||
manually!
|
manually! Also make sure there are no shared artifacts left in the `tmp`,
|
||||||
|
`.vagrant` and `solar` directoories, otherwise other vagrant providers
|
||||||
|
may fail to provision nodes or Solar CLI to behave in unexpected way:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
# rm -f /tmp/solar-*
|
||||||
|
# rm -rf /tmp/solar_local
|
||||||
|
# rm -rf tmp
|
||||||
|
# rm -rf .vagrant/machines
|
||||||
|
# find solar -name "*.pyc" -delete
|
||||||
|
Loading…
Reference in New Issue
Block a user