Running examples using fuel-devops
- create libvirt env using fuel-devops - add script to test examples Change-Id: I37c4d6d25d7f2527419bb18649704af45c3f9ae3
This commit is contained in:
parent
6210412931
commit
88eebfdc85
1
examples/hosts_file/hosts.py
Normal file → Executable file
1
examples/hosts_file/hosts.py
Normal file → Executable file
@ -1,3 +1,4 @@
|
||||
#!/usr/bin/python
|
||||
import click
|
||||
import sys
|
||||
import time
|
||||
|
16
utils/jenkins/README.md
Normal file
16
utils/jenkins/README.md
Normal file
@ -0,0 +1,16 @@
|
||||
Examples testing
|
||||
================
|
||||
|
||||
To automaticly test examples install first fuel-devops framework. Installation process is described here https://github.com/openstack/fuel-devops. After installation run migrations scripts:
|
||||
|
||||
```bash
|
||||
export DJANGO_SETTINGS_MODULE=devops.settings
|
||||
django-admin.py syncdb
|
||||
django-admin.py migrate
|
||||
```
|
||||
|
||||
To test examples run one of available test scripts. You need to run it from solar main dir, for example:
|
||||
|
||||
```
|
||||
./utils/jenkins/run_hosts_example.sh
|
||||
```
|
92
utils/jenkins/default.yaml
Normal file
92
utils/jenkins/default.yaml
Normal file
@ -0,0 +1,92 @@
|
||||
---
|
||||
rack-01-node-params:
|
||||
vcpu: 2
|
||||
memory: 1024
|
||||
boot:
|
||||
- hd
|
||||
volumes:
|
||||
- name: base
|
||||
source_image:
|
||||
format: qcow2
|
||||
interfaces:
|
||||
- label: eth0
|
||||
l2_network_device: public
|
||||
- label: eth1
|
||||
l2_network_device: private
|
||||
- label: eth2
|
||||
l2_network_device: storage
|
||||
- label: eth3
|
||||
l2_network_device: management
|
||||
network_config:
|
||||
eth0:
|
||||
networks:
|
||||
- public
|
||||
eth1:
|
||||
networks:
|
||||
- private
|
||||
eth2:
|
||||
networks:
|
||||
- storage
|
||||
eth3:
|
||||
networks:
|
||||
- management
|
||||
|
||||
|
||||
env_name:
|
||||
|
||||
address_pools:
|
||||
# Network pools used by the environment
|
||||
public-pool01:
|
||||
net: 10.0.0.0/16:24
|
||||
params:
|
||||
tag: 0
|
||||
private-pool01:
|
||||
net: 10.1.0.0/16:24
|
||||
params:
|
||||
tag: 101
|
||||
storage-pool01:
|
||||
net: 10.2.0.0/16:24
|
||||
params:
|
||||
tag: 102
|
||||
management-pool01:
|
||||
net: 10.3.0.0/16:24
|
||||
params:
|
||||
tag: 103
|
||||
|
||||
groups:
|
||||
- name: rack-01
|
||||
driver:
|
||||
name: devops.driver.libvirt.libvirt_driver
|
||||
params:
|
||||
connection_string: qemu:///system
|
||||
storage_pool_name: default
|
||||
stp: True
|
||||
hpet: False
|
||||
use_host_cpu: true
|
||||
|
||||
network_pools: # Address pools for OpenStack networks.
|
||||
# Actual names should be used for keys
|
||||
# (the same as in Nailgun, for example)
|
||||
|
||||
public: public-pool01
|
||||
private: private-pool01
|
||||
storage: storage-pool01
|
||||
management: management-pool01
|
||||
|
||||
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
|
||||
public:
|
||||
address_pool: public-pool01
|
||||
dhcp: "true"
|
||||
forward:
|
||||
mode: nat
|
||||
|
||||
private:
|
||||
address_pool: private-pool01
|
||||
storage:
|
||||
address_pool: storage-pool01
|
||||
management:
|
||||
address_pool: management-pool01
|
||||
|
||||
nodes:
|
||||
- name: solar
|
||||
role: master
|
44
utils/jenkins/env.py
Normal file
44
utils/jenkins/env.py
Normal file
@ -0,0 +1,44 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from devops.models import Environment
|
||||
|
||||
|
||||
def create_config():
|
||||
env = os.environ
|
||||
|
||||
conf_path = env['CONF_PATH']
|
||||
with open(conf_path) as c:
|
||||
conf = yaml.load(c.read())
|
||||
|
||||
env_name = env['ENV_NAME']
|
||||
image_path = env['IMAGE_PATH']
|
||||
slaves_count = int(env['SLAVES_COUNT'])
|
||||
|
||||
conf['env_name'] = env_name
|
||||
node_params = conf['rack-01-node-params']
|
||||
node_params['volumes'][0]['source_image'] = image_path
|
||||
|
||||
group = conf['groups'][0]
|
||||
for i in range(slaves_count):
|
||||
group['nodes'].append({'name': 'slave-{}'.format(i),
|
||||
'role': 'slave'})
|
||||
for node in group['nodes']:
|
||||
node['params'] = node_params
|
||||
return {'template': {'devops_settings': conf}}
|
||||
|
||||
def get_ips(env):
|
||||
admin=env.get_node(role='master')
|
||||
return admin.get_ip_address_by_network_name('public')
|
||||
|
||||
def define_from_config(conf):
|
||||
env = Environment.create_environment(conf)
|
||||
env.define()
|
||||
env.start()
|
||||
print get_ips(env)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
config = create_config()
|
||||
define_from_config(config)
|
37
utils/jenkins/repository/nodes/1.0.0/nodes.yaml
Normal file
37
utils/jenkins/repository/nodes/1.0.0/nodes.yaml
Normal file
@ -0,0 +1,37 @@
|
||||
id: simple_riak_with_transports
|
||||
resources:
|
||||
#% for i in range(count|int) %#
|
||||
#% set j = i +1 %#
|
||||
- id: ssh_transport#{j}#
|
||||
from: resources/transport_ssh
|
||||
input:
|
||||
ssh_user: 'vagrant'
|
||||
ssh_password: 'vagrant'
|
||||
- id: rsync#{j}#
|
||||
from: resources/transport_rsync
|
||||
input:
|
||||
user: vagrant
|
||||
password: vagrant
|
||||
- id: transports#{j}#
|
||||
from: resources/transports
|
||||
input:
|
||||
transports:
|
||||
- password: ssh_transport#{j}#::ssh_password
|
||||
user: ssh_transport#{j}#::ssh_user
|
||||
port: ssh_transport#{j}#::ssh_port
|
||||
name: ssh_transport#{j}#::name
|
||||
- password: rsync#{j}#::password
|
||||
name: rsync#{j}#::name
|
||||
user: rsync#{j}#::user
|
||||
port: rsync#{j}#::port
|
||||
- id: node#{j}#
|
||||
from: resources/ro_node
|
||||
input:
|
||||
name: node#{j}#
|
||||
ip: '10.0.0.#{i + 3}#'
|
||||
transports_id: transports#{j}#::transports_id
|
||||
- id: hosts_file#{j}#
|
||||
from: resources/hosts_file
|
||||
location: node#{j}#
|
||||
tags: ['location=node#{j}#']
|
||||
#% endfor %#
|
83
utils/jenkins/run.sh
Executable file
83
utils/jenkins/run.sh
Executable file
@ -0,0 +1,83 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
# for now we assume that master ip is 10.0.0.2 and slaves ips are 10.0.0.{3,4,5,...}
|
||||
ADMIN_IP=10.0.0.2
|
||||
ADMIN_PASSWORD=vagrant
|
||||
ADMIN_USER=vagrant
|
||||
INSTALL_DIR=/vagrant
|
||||
|
||||
ENV_NAME=${ENV_NAME:-solar-test}
|
||||
SLAVES_COUNT=${SLAVES_COUNT:-0}
|
||||
CONF_PATH=${CONF_PATH:-utils/jenkins/default.yaml}
|
||||
|
||||
IMAGE_PATH=${IMAGE_PATH:-bootstrap/output-qemu/ubuntu1404}
|
||||
TEST_SCRIPT=${TEST_SCRIPT:-/vagrant/examples/hosts_file/hosts.py}
|
||||
DEPLOY_TIMEOUT=${DEPLOY_TIMEOUT:-60}
|
||||
|
||||
dos.py erase ${ENV_NAME} || true
|
||||
ENV_NAME=${ENV_NAME} SLAVES_COUNT=${SLAVES_COUNT} IMAGE_PATH=${IMAGE_PATH} CONF_PATH=${CONF_PATH} python utils/jenkins/env.py
|
||||
|
||||
# Wait for master to boot
|
||||
sleep 30
|
||||
|
||||
sshpass -p ${ADMIN_PASSWORD} rsync -az . -e ssh ${ADMIN_USER}@${ADMIN_IP}:/home/vagrant/solar --include bootstrap/playbooks --exclude "bootstrap/*" --exclude .tox --exclude tmp --exclude x-venv
|
||||
|
||||
sshpass -p ${ADMIN_PASSWORD} ssh ${ADMIN_USER}@${ADMIN_IP} bash -s <<EOF
|
||||
set -x
|
||||
export PYTHONWARNINGS="ignore"
|
||||
|
||||
sudo rm -rf /vagrant
|
||||
sudo mv /home/vagrant/solar /vagrant
|
||||
|
||||
sudo chown -R ${ADMIN_USER} ${INSTALL_DIR}
|
||||
sudo ansible-playbook -v -i \"localhost,\" -c local ${INSTALL_DIR}/bootstrap/playbooks/solar.yaml
|
||||
|
||||
set -e
|
||||
|
||||
# wait for riak
|
||||
sudo docker exec vagrant_riak_1 riak-admin wait_for_service riak_kv
|
||||
|
||||
export SOLAR_CONFIG_OVERRIDE="/.solar_config_override"
|
||||
|
||||
solar repo update templates ${INSTALL_DIR}/utils/jenkins/repository
|
||||
|
||||
bash -c "${TEST_SCRIPT}"
|
||||
|
||||
solar changes stage
|
||||
solar changes process
|
||||
solar orch run-once
|
||||
|
||||
elapsed_time=0
|
||||
while true
|
||||
do
|
||||
report=\$(solar o report)
|
||||
|
||||
errors=\$(echo "\${report}" | grep -e ERROR | wc -l)
|
||||
if [ "\${errors}" != "0" ]; then
|
||||
solar orch report
|
||||
echo FAILURE
|
||||
exit 1
|
||||
fi
|
||||
|
||||
running=\$(echo "\${report}" | grep -e PENDING -e INPROGRESS | wc -l)
|
||||
if [ "\${running}" == "0" ]; then
|
||||
solar orch report
|
||||
echo SUCCESS
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "\${elapsed_time}" -gt "${DEPLOY_TIMEOUT}" ]; then
|
||||
solar orch report
|
||||
echo TIMEOUT
|
||||
exit 2
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
let elapsed_time+=5
|
||||
done
|
||||
EOF
|
||||
|
||||
if [ "$?" -eq "0" ];then
|
||||
dos.py erase ${ENV_NAME} || true
|
||||
fi
|
9
utils/jenkins/run_hosts_example.sh
Executable file
9
utils/jenkins/run_hosts_example.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
export ENV_NAME="solar-example"
|
||||
export SLAVES_COUNT=2
|
||||
export DEPLOY_TIMEOUT=180
|
||||
export TEST_SCRIPT="/usr/bin/python /vagrant/examples/hosts_file/hosts.py"
|
||||
|
||||
./utils/jenkins/run.sh
|
9
utils/jenkins/run_openstack_example.sh
Executable file
9
utils/jenkins/run_openstack_example.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
export ENV_NAME="solar-example"
|
||||
export SLAVES_COUNT=2
|
||||
export DEPLOY_TIMEOUT=2400
|
||||
export TEST_SCRIPT="/usr/bin/python /vagrant/examples/openstack/openstack.py create_all"
|
||||
|
||||
./utils/jenkins/run.sh
|
9
utils/jenkins/run_riak_example.sh
Executable file
9
utils/jenkins/run_riak_example.sh
Executable file
@ -0,0 +1,9 @@
|
||||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
export ENV_NAME="solar-example"
|
||||
export SLAVES_COUNT=3
|
||||
export DEPLOY_TIMEOUT=300
|
||||
export TEST_SCRIPT="/usr/bin/python /vagrant/examples/riak/riaks.py create_all"
|
||||
|
||||
./utils/jenkins/run.sh
|
Loading…
x
Reference in New Issue
Block a user