Merge "Separate vagrantfile for the docker provider"

This commit is contained in:
Jenkins 2016-03-08 08:20:23 +00:00 committed by Gerrit Code Review
commit 7952a8c577
5 changed files with 302 additions and 219 deletions

299
Vagrantfile vendored
View File

@ -16,22 +16,12 @@
Vagrant.require_version ">= 1.7.4"
require 'etc'
require 'log4r'
require 'yaml'
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
HOME=Etc.getpwuid.dir
# Solar specific key paths mappings
INSECURE_KEY="#{HOME}/.vagrant.d/insecure_private_key"
KEY_PATH1="/vagrant/tmp/keys/ssh_private"
def get_machine_key (index = '')
"/vagrant/.vagrant/machines/solar-dev#{index}/virtualbox/private_key"
end
# configs, custom updates _defaults
@logger = Log4r::Logger.new("vagrant::docker::driver")
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
if File.exist?('vagrant-settings.yaml')
custom_cfg = YAML.load_file('vagrant-settings.yaml')
@ -54,157 +44,68 @@ MASTER_CPUS = cfg["master_cpus"]
SLAVES_CPUS = cfg["slaves_cpus"]
PARAVIRT_PROVIDER = cfg.fetch('paravirtprovider', false)
PREPROVISIONED = cfg.fetch('preprovisioned', true)
DOCKER_MASTER_IMAGE=cfg['docker_master_image']
DOCKER_SLAVES_IMAGE=cfg['docker_slaves_image']
DOCKER_CMD=cfg['docker_cmd']
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
# Initialize noop plugins only in case of PXE boot
require_relative 'bootstrap/vagrant_plugins/noop' unless PREPROVISIONED
# FIXME(bogdando) more natively to distinguish a provider specific logic
provider = (ARGV[2] || ENV['VAGRANT_DEFAULT_PROVIDER'] || :docker).to_sym
def ansible_playbook_command(filename, args=[])
ansible_script_crafted = "ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
@logger.info("Crafted ansible-script: #{ansible_script_crafted})")
ansible_script_crafted
end
def shell_script(filename, env=[], args=[])
shell_script_crafted = "/bin/bash -c \"#{env.join ' '} #{filename} #{args.join ' '} 2>/dev/null\""
@logger.info("Crafted shell-script: #{shell_script_crafted})")
shell_script_crafted
end
# W/a unimplemented docker-exec, see https://github.com/mitchellh/vagrant/issues/4179
# Use docker exec instead of the SSH provisioners
# TODO(bogdando) lxc-docker support (there is no exec)
def docker_exec (name, script)
@logger.info("Executing docker-exec at #{name}: #{script}")
system "docker exec -it #{name} #{script}"
"ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
end
solar_script = ansible_playbook_command("solar.yaml")
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
master_pxe = ansible_playbook_command("pxe.yaml")
fix_six = shell_script("/vagrant/bootstrap/playbooks/fix_centos7_six.sh")
solar_exec = shell_script("#{solar_script}", ["SOLAR_DB_BACKEND=#{SOLAR_DB_BACKEND}"])
if provider == :docker
# Prepare docker volumes and workaround missing machines' ssh_keys
# and virtualbox hardcoded paths in Solar
key=get_machine_key
docker_volumes = ["-v", "#{INSECURE_KEY}:#{KEY_PATH1}:ro"]
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro",
"-v", "/var/tmp/vagrant:/var/tmp/vagrant",
"-v", "/sys/fs/cgroup:/sys/fs/cgroup",
"-v", "/var/run/docker.sock:/var/run/docker.sock" ]
SLAVES_COUNT.times do |i|
index = i + 1
key = get_machine_key index.to_s
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro"]
end
docker_volumes.flatten!
@logger.info("Crafted docker volumes: #{docker_volumes}")
end
master_pxe = ansible_playbook_command("pxe.yaml")
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
if provider == :docker
# W/a unimplemented docker networking, see
# https://github.com/mitchellh/vagrant/issues/6667.
# Create or delete the solar net (depends on the vagrant action)
# TODO(bogdando) use https://github.com/jpetazzo/pipework for multi net.
# Hereafter, we will use only the 1st IP address and a single interface.
config.trigger.before :up do
system <<-SCRIPT
if ! docker network inspect solar >/dev/null 2>&1 ; then
docker network create -d bridge \
-o "com.docker.network.bridge.enable_icc"="true" \
-o "com.docker.network.bridge.enable_ip_masquerade"="true" \
-o "com.docker.network.driver.mtu"="1500" \
--gateway=#{SLAVES_IPS[0]}1 \
--ip-range=#{SLAVES_IPS[0]}0/24 \
--subnet=#{SLAVES_IPS[0]}0/24 \
solar >/dev/null 2>&1
fi
SCRIPT
end
config.trigger.after :destroy do
system <<-SCRIPT
docker network rm solar >/dev/null 2>&1
SCRIPT
end
config.vm.provider :docker do |d, override|
d.image = DOCKER_MASTER_IMAGE
d.remains_running = false
d.has_ssh = false
d.cmd = DOCKER_CMD.split(' ')
end
else
config.vm.box = MASTER_IMAGE
config.vm.box_version = MASTER_IMAGE_VERSION
end
config.vm.define "solar-dev", primary: true do |config|
config.vm.box = MASTER_IMAGE
config.vm.box_version = MASTER_IMAGE_VERSION
config.vm.provision "shell", inline: solar_script, privileged: true, env: {"SOLAR_DB_BACKEND": SOLAR_DB_BACKEND}
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
config.vm.host_name = "solar-dev"
if provider == :docker
config.vm.provider :docker do |d, override|
d.name = "solar-dev"
d.create_args = ["-i", "-t", "--privileged", "--ip=#{MASTER_IPS[0]}", "--net=solar",
docker_volumes].flatten
end
config.trigger.after :up, :option => { :vm => 'solar-dev'} do
docker_exec("solar-dev","/usr/sbin/rsyslogd >/dev/null 2>&1")
docker_exec("solar-dev","/usr/sbin/sshd >/dev/null 2>&1")
docker_exec("solar-dev","#{fix_six} >/dev/null 2>&1")
docker_exec("solar-dev","#{solar_exec}")
docker_exec("solar-dev","#{master_pxe} >/dev/null 2>&1") unless PREPROVISIONED
end
else
# not the docker provider
config.vm.provision "shell", inline: solar_script, privileged: true, env: {"SOLAR_DB_BACKEND": SOLAR_DB_BACKEND}
config.vm.provision "shell", inline: master_pxe, privileged: true unless PREPROVISIONED
config.vm.provision "file", source: INSECURE_KEY, destination: KEY_PATH1
config.vm.provider :virtualbox do |v|
v.memory = MASTER_RAM
v.cpus = MASTER_CPUS
v.customize [
"modifyvm", :id,
"--memory", MASTER_RAM,
"--cpus", MASTER_CPUS,
"--ioapic", "on",
]
if PARAVIRT_PROVIDER
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
end
v.name = "solar-dev"
config.vm.provider :virtualbox do |v|
v.memory = MASTER_RAM
v.cpus = MASTER_CPUS
v.customize [
"modifyvm", :id,
"--memory", MASTER_RAM,
"--cpus", MASTER_CPUS,
"--ioapic", "on",
]
if PARAVIRT_PROVIDER
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
end
v.name = "solar-dev"
end
config.vm.provider :libvirt do |libvirt|
libvirt.driver = 'kvm'
libvirt.memory = MASTER_RAM
libvirt.cpus = MASTER_CPUS
libvirt.nested = true
libvirt.cpu_mode = 'host-passthrough'
libvirt.volume_cache = 'unsafe'
libvirt.disk_bus = "virtio"
end
config.vm.provider :libvirt do |libvirt|
libvirt.driver = 'kvm'
libvirt.memory = MASTER_RAM
libvirt.cpus = MASTER_CPUS
libvirt.nested = true
libvirt.cpu_mode = 'host-passthrough'
libvirt.volume_cache = 'unsafe'
libvirt.disk_bus = "virtio"
end
ind = 0
MASTER_IPS.each do |ip|
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
ind = ind + 1
end
if SYNC_TYPE == 'nfs'
config.vm.synced_folder ".", "/vagrant", type: "nfs"
end
if SYNC_TYPE == 'rsync'
config.vm.synced_folder ".", "/vagrant", type: "rsync",
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
end
if SYNC_TYPE == 'nfs'
config.vm.synced_folder ".", "/vagrant", type: "nfs"
end
if SYNC_TYPE == 'rsync'
config.vm.synced_folder ".", "/vagrant", type: "rsync",
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
end
ind = 0
MASTER_IPS.each do |ip|
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
ind = ind + 1
end
end
@ -212,83 +113,69 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
index = i + 1
ip_index = i + 3
config.vm.define "solar-dev#{index}" do |config|
# Standard box with all stuff preinstalled
config.vm.box = SLAVES_IMAGE
config.vm.box_version = SLAVES_IMAGE_VERSION
config.vm.host_name = "solar-dev#{index}"
if provider == :docker
config.vm.provider :docker do |d, override|
d.name = "solar-dev#{index}"
d.image = DOCKER_SLAVES_IMAGE
d.create_args = ["-i", "-t", "--privileged", "--ip=#{SLAVES_IPS[0]}#{ip_index}", "--net=solar",
docker_volumes].flatten
end
config.trigger.after :up, :option => { :vm => "solar-dev#{index}" } do
docker_exec("solar-dev#{index}","/usr/sbin/rsyslogd >/dev/null 2>&1")
docker_exec("solar-dev#{index}","/usr/sbin/sshd >/dev/null 2>&1")
docker_exec("solar-dev#{index}","#{fix_six} >/dev/null 2>&1")
docker_exec("solar-dev#{index}","#{solar_agent_script}") if PREPROVISIONED
if PREPROVISIONED
config.vm.provision "shell", inline: solar_agent_script, privileged: true
#TODO(bogdando) figure out how to configure multiple interfaces when was not PREPROVISIONED
ind = 0
SLAVES_IPS.each do |ip|
config.vm.network :private_network, ip: "#{ip}#{ip_index}", :dev => "solbr#{ind}", :mode => 'nat'
ind = ind + 1
end
else
# not the docker provider
# Standard box with all stuff preinstalled
config.vm.box = SLAVES_IMAGE
config.vm.box_version = SLAVES_IMAGE_VERSION
# Disable attempts to install guest os and check that node is booted using ssh,
# because nodes will have ip addresses from dhcp, and vagrant doesn't know
# which ip to use to perform connection
config.vm.communicator = :noop
config.vm.guest = :noop_guest
# Configure network to boot vm using pxe
config.vm.network "private_network", adapter: 1, ip: "10.0.0.#{ip_index}"
config.vbguest.no_install = true
config.vbguest.auto_update = false
end
if PREPROVISIONED
config.vm.provision "shell", inline: solar_agent_script, privileged: true
#TODO(bogdando) figure out how to configure multiple interfaces when was not PREPROVISIONED
ind = 0
SLAVES_IPS.each do |ip|
config.vm.network :private_network, ip: "#{ip}#{ip_index}", :dev => "solbr#{ind}", :mode => 'nat'
ind = ind + 1
end
else
# Disable attempts to install guest os and check that node is booted using ssh,
# because nodes will have ip addresses from dhcp, and vagrant doesn't know
# which ip to use to perform connection
config.vm.communicator = :noop
config.vm.guest = :noop_guest
# Configure network to boot vm using pxe
config.vm.network "private_network", adapter: 1, ip: "10.0.0.#{ip_index}"
config.vbguest.no_install = true
config.vbguest.auto_update = false
config.vm.provider :virtualbox do |v|
boot_order(v, ['net', 'disk'])
v.customize [
"modifyvm", :id,
"--memory", SLAVES_RAM,
"--cpus", SLAVES_CPUS,
"--ioapic", "on",
"--macaddress1", "auto",
]
if PARAVIRT_PROVIDER
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
end
v.name = "solar-dev#{index}"
end
config.vm.provider :virtualbox do |v|
boot_order(v, ['net', 'disk'])
v.customize [
"modifyvm", :id,
"--memory", SLAVES_RAM,
"--cpus", SLAVES_CPUS,
"--ioapic", "on",
"--macaddress1", "auto",
]
if PARAVIRT_PROVIDER
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
end
v.name = "solar-dev#{index}"
config.vm.provider :libvirt do |libvirt|
libvirt.driver = 'kvm'
libvirt.memory = SLAVES_RAM
libvirt.cpus = SLAVES_CPUS
libvirt.nested = true
libvirt.cpu_mode = 'host-passthrough'
libvirt.volume_cache = 'unsafe'
libvirt.disk_bus = "virtio"
end
if PREPROVISIONED
if SYNC_TYPE == 'nfs'
config.vm.synced_folder ".", "/vagrant", type: "nfs"
end
config.vm.provider :libvirt do |libvirt|
libvirt.driver = 'kvm'
libvirt.memory = SLAVES_RAM
libvirt.cpus = SLAVES_CPUS
libvirt.nested = true
libvirt.cpu_mode = 'host-passthrough'
libvirt.volume_cache = 'unsafe'
libvirt.disk_bus = "virtio"
end
if PREPROVISIONED
if SYNC_TYPE == 'nfs'
config.vm.synced_folder ".", "/vagrant", type: "nfs"
end
if SYNC_TYPE == 'rsync'
config.vm.synced_folder ".", "/vagrant", type: "rsync",
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
end
if SYNC_TYPE == 'rsync'
config.vm.synced_folder ".", "/vagrant", type: "rsync",
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
end
end
end
end
end

160
Vagrantfile_docker Normal file
View File

@ -0,0 +1,160 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
Vagrant.require_version ">= 1.7.4"
require 'etc'
require 'log4r'
require 'yaml'
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
ENV['VAGRANT_DEFAULT_PROVIDER'] = "docker"
HOME=Etc.getpwuid.dir
# Solar examples specific key paths mappings
INSECURE_KEY="#{HOME}/.vagrant.d/insecure_private_key"
KEY_PATH1="/vagrant/tmp/keys/ssh_private"
def get_machine_key (index = '')
"/vagrant/.vagrant/machines/solar-dev#{index}/virtualbox/private_key"
end
# configs, custom updates _defaults
@logger = Log4r::Logger.new("vagrant::docker::driver")
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
if File.exist?('vagrant-settings.yaml')
custom_cfg = YAML.load_file('vagrant-settings.yaml')
cfg = defaults_cfg.merge(custom_cfg)
else
cfg = defaults_cfg
end
SLAVES_COUNT = cfg["slaves_count"]
SLAVES_IPS = cfg["slaves_ips"]
MASTER_IPS = cfg["master_ips"]
DOCKER_MASTER_IMAGE=cfg['docker_master_image']
DOCKER_SLAVES_IMAGE=cfg['docker_slaves_image']
DOCKER_CMD=cfg['docker_cmd']
SOLAR_DB_BACKEND = cfg.fetch('solar_db_backend', 'riak')
def ansible_playbook_command(filename, args=[])
ansible_script_crafted = "ansible-playbook -v -i \"localhost,\" -c local /vagrant/bootstrap/playbooks/#{filename} #{args.join ' '}"
@logger.info("Crafted ansible-script: #{ansible_script_crafted})")
ansible_script_crafted
end
def shell_script(filename, env=[], args=[])
shell_script_crafted = "/bin/bash -c \"#{env.join ' '} #{filename} #{args.join ' '} 2>/dev/null\""
@logger.info("Crafted shell-script: #{shell_script_crafted})")
shell_script_crafted
end
# FIXME(bogdando) w/a unimplemented docker-exec
# see https://github.com/mitchellh/vagrant/issues/4179
# Use docker exec instead of the SSH provisioners
def docker_exec (name, script)
@logger.info("Executing docker-exec at #{name}: #{script}")
system "docker exec -it #{name} #{script}"
end
solar_script = ansible_playbook_command("solar.yaml")
solar_agent_script = ansible_playbook_command("solar-agent.yaml")
solar_exec = shell_script("#{solar_script}", ["SOLAR_DB_BACKEND=#{SOLAR_DB_BACKEND}"])
# NOTE(bogdando) w/a for a centos7 issue
fix_six = shell_script("/vagrant/bootstrap/playbooks/fix_centos7_six.sh")
# TODO(bogdando) use https://github.com/jpetazzo/pipework for multi net.
# Hereafter, we will use only the 1st IP address and a single interface.
# Define the solar net with the given gateway, ip-range, subnet.
docker_pre = shell_script("bootstrap/playbooks/docker_pre.sh", [],
[ "#{SLAVES_IPS[0]}1", "#{SLAVES_IPS[0]}0/24", "#{SLAVES_IPS[0]}0/24" ])
# Destroy the solar net and do additional teardown steps
docker_post = shell_script("bootstrap/playbooks/docker_post.sh")
# Prepare docker volumes and workaround missing machines' ssh_keys
# and virtualbox hardcoded paths in Solar
key=get_machine_key
docker_volumes = ["-v", "#{INSECURE_KEY}:#{KEY_PATH1}:ro"]
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro",
"-v", "/var/tmp/vagrant:/var/tmp/vagrant",
"-v", "/sys/fs/cgroup:/sys/fs/cgroup",
"-v", "/var/run/docker.sock:/var/run/docker.sock" ]
SLAVES_COUNT.times do |i|
index = i + 1
key = get_machine_key index.to_s
docker_volumes << ["-v", "#{INSECURE_KEY}:#{key}:ro"]
end
docker_volumes.flatten!
@logger.info("Crafted docker volumes: #{docker_volumes}")
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.trigger.before :up do
@logger.info("Executing docker network create steps")
system "#{docker_pre}"
end
config.trigger.after :destroy do
@logger.info("Executing docker teardown steps")
system "#{docker_post}"
end
config.vm.provider :docker do |d, override|
d.image = DOCKER_MASTER_IMAGE
d.remains_running = false
d.has_ssh = false
d.cmd = DOCKER_CMD.split(' ')
end
# disable nfs, we use volume mounts instead
config.nfs.functional = false
config.vm.synced_folder ".", "/vagrant", :nfs => false
# Configure the solar-dev node
config.vm.define "solar-dev", primary: true do |config|
config.vm.host_name = "solar-dev"
config.vm.provider :docker do |d, override|
d.name = "solar-dev"
d.create_args = ["-i", "-t", "--privileged", "--ip=#{MASTER_IPS[0]}", "--net=solar",
docker_volumes].flatten
end
# NOTE(bogdando) ssh provisioners are not wellcome here
config.trigger.after :up, :option => { :vm => 'solar-dev'} do
docker_exec("solar-dev","/usr/sbin/rsyslogd >/dev/null 2>&1")
docker_exec("solar-dev","/usr/sbin/sshd >/dev/null 2>&1")
docker_exec("solar-dev","#{fix_six} >/dev/null 2>&1")
docker_exec("solar-dev","#{solar_exec}")
end
end
# Configure the rest of the solar nodes
SLAVES_COUNT.times do |i|
index = i + 1
ip_index = i + 3
config.vm.define "solar-dev#{index}" do |config|
config.vm.host_name = "solar-dev#{index}"
config.vm.provider :docker do |d, override|
d.name = "solar-dev#{index}"
d.image = DOCKER_SLAVES_IMAGE
d.create_args = ["-i", "-t", "--privileged", "--ip=#{SLAVES_IPS[0]}#{ip_index}", "--net=solar",
docker_volumes].flatten
end
config.trigger.after :up, :option => { :vm => "solar-dev#{index}" } do
docker_exec("solar-dev#{index}","/usr/sbin/rsyslogd >/dev/null 2>&1")
docker_exec("solar-dev#{index}","/usr/sbin/sshd >/dev/null 2>&1")
docker_exec("solar-dev#{index}","#{fix_six} >/dev/null 2>&1")
docker_exec("solar-dev#{index}","#{solar_agent_script}")
end
end
end
end

View File

@ -0,0 +1,11 @@
#!/bin/sh
# FIXME(bogdando) additional teardown steps after docker
# asks for sudo password
docker network rm solar
docker stop vagrant_pg_1
docker stop vagrant_riak_1
docker rm vagrant_pg_1
docker rm vagrant_riak_1
sudo rm -rf /tmp/solar*
sudo rm -rf tmp
sudo rm -f .vagrant/machines/solar-dev*/virtualbox/private_key

View File

@ -0,0 +1,12 @@
#!/bin/sh
# FIXME(bogdando) w/a unimplemented docker networking, see
# https://github.com/mitchellh/vagrant/issues/6667.
# Create the docker solar net
if ! docker network inspect solar >/dev/null 2>&1 ; then
docker network create -d bridge \
-o "com.docker.network.bridge.enable_icc"="true" \
-o "com.docker.network.bridge.enable_ip_masquerade"="true" \
-o "com.docker.network.driver.mtu"="1500" \
--gateway=$1 --ip-range=$2 --subnet=$3 \
solar >/dev/null 2>&1
fi

View File

@ -73,24 +73,27 @@ https://github.com/openstack/solar-resources
Can I run solar nodes with docker ?
-----------------------------------
Yes, although that is an experimental feature and currently supports only
a single network interface per a container. Note, that before to run the
``vagrant up --provider docker`` command, the following preparations must be
done at the host system:
Yes, the docker >=1.10.0 and the vagrant-triggers plugin are required.
Note that the vagrant docker provider is an *experimental* and supports
only a single network interface per a container. There is a separate
``Vagrantfile_docker`` file. Before using the
``vagrant up --provider docker`` command, rename it to the ``Vagrantfile``
and do the following preparations at the host system as well:
.. code-block:: bash
# docker pull solarproject/riak
$ docker pull solarproject/riak
or, depending on the configured DB backend:
or, depending on the configured DB backend (this also requires the
packages make, autoconf, gcc-c++ or g++):
.. code-block:: bash
# git clone https://github.com/kiasaki/docker-alpine-postgres.git
# cd docker-alpine-postgres
# make build && cd -
$ git clone https://github.com/kiasaki/docker-alpine-postgres.git
$ cd docker-alpine-postgres
$ make build && cd -
This will allow the solar nodes to run required nested DB containers.
Those will allow the solar nodes to run required nested DB containers.
.. note ::
The command ``vagrant ssh`` will not be working for the docker case.
@ -98,11 +101,21 @@ This will allow the solar nodes to run required nested DB containers.
.. code-block:: bash
# ssh vagrant@10.0.0.2
# docker exec -it solar-dev bash
$ ssh vagrant@10.0.0.2
$ docker exec -it solar-dev bash
.. note ::
The command ``vagrant destroy`` only cleans up containers for solar nodes
and does not clean up other containers launched, like riak, postgres,
kolla or the like. You should stop and remove them from the host system
manually!
manually! Also make sure there are no shared artifacts left in the `tmp`,
`.vagrant` and `solar` directoories, otherwise other vagrant providers
may fail to provision nodes or Solar CLI to behave in unexpected way:
.. code-block:: bash
# rm -f /tmp/solar-*
# rm -rf /tmp/solar_local
# rm -rf tmp
# rm -rf .vagrant/machines
# find solar -name "*.pyc" -delete