Remove outdated examples directory and create orch_fixtures with README

This commit is contained in:
Dmitry Shulyak 2015-09-04 12:42:06 +03:00
parent 26396282a6
commit 7d94b0dacf
32 changed files with 18 additions and 312 deletions

View File

@ -1,4 +0,0 @@
#!/bin/bash
docker exec -it solar.keystone-test keystone --debug --os-username admin --os-password password --os-tenant-name admin --os-auth-url http://10.0.0.3:8080/v2.0 role-list

View File

@ -1,9 +0,0 @@
- id: node_1
ip: 10.0.0.3
ssh_user: vagrant
ssh_key: /vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key
- id: node_2
ip: 10.0.0.4
ssh_user: vagrant
ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key

View File

@ -1,13 +0,0 @@
id: containers
class: containers
type: resource
handler: ansible
version: v1
actions:
ensure: simple/containers/ensure.yml
clean_list: simple/containers/clean_list.yml
input: {}
tags: [service/containers]

View File

@ -1,14 +0,0 @@
id: docker
class: docker
type: resource
handler: ansible
version: v1
actions:
run: simple/docker/run.yml
remove: simple/docker/remove.yml
input:
base_image: ubuntu
tags: [service/docker]

View File

@ -1,14 +0,0 @@
id: docker_2
class: docker
type: resource
handler: ansible
version: v1
actions:
run: simple/docker/run.yml
remove: simple/docker/remove.yml
input:
base_image: ubuntu
node:
link: node_2
tags:
- service/docker2

View File

@ -1,32 +0,0 @@
id: haproxy
class: haproxy
type: resource
handler: ansible
version: v1
actions:
run: simple/haproxy/run.yml
remove: simple/haproxy/remove.yml
input:
name: solar.haproxy-test
image: 'haproxy:1.5'
services:
- service_name: keystone-admin
bind: '*:8080'
backends:
'with_tags': ['service/keystone']
'item':
'remote_name': '{{ item.name }}'
'remote_addr': '{{ item.node.ip }}:{{ item.admin_port }}'
- service_name: keystone-pub
bind: '*:8081'
backends:
with_tags: ["service/keystone"]
item:
remote_name: '{{ item.name }}'
remote_addr: '{{ item.node.ip }}:{{ item.public_port }}'
tags: [service/haproxy]

View File

@ -1,23 +0,0 @@
id: keystone
class: keystone
type: resource
handler: ansible
version: v1
actions:
run: simple/keystone/run.yml
remove: simple/keystone/remove.yml
input:
db_root_password:
first_with_tags: ["entrypoint/mariadb"]
item: '{{ item.root_password }}'
db_host:
first_with_tags: ["entrypoint/mariadb"]
item: '{{ item.node.ip }}'
admin_port: '{{ this.node.keystone_admin_port|d(35357) }}'
public_port: 5000
name: solar.keystone-test
image: kollaglue/centos-rdo-keystone
tags: [service/keystone]

View File

@ -1,23 +0,0 @@
id: mariadb
class: mariadb
type: resource
handler: ansible
version: v1
actions:
run: simple/mariadb/run.yml
remove: simple/mariadb/remove.yml
wait: simple/mariadb/wait.yml
users: simple/mariadb/users.yml
input:
bind_ip: "{{ this.node.ip }}"
name: solar.mariadb-test
image: kollaglue/fedora-rdo-mariadb-app
root_password: test1
users:
- name: test1
password: test1
tags:
- service/mariadb
- entrypoint/mariadb

View File

@ -1,6 +0,0 @@
- hosts: [service/containers]
sudo: yes
tasks:
- file: state=directory path=/var/lib/solar/
- shell: echo -n "" > /var/lib/solar/containers_list

View File

@ -1,9 +0,0 @@
- hosts: [service/containers]
sudo: yes
tasks:
- shell: docker ps -a | egrep -v "CONTAINER ID|$(cat /var/lib/solar/containers_list | tr '\n' '|' | sed 's/|$//')" | egrep 'solar\.' | awk '{print $1}'
register: containers
- shell: docker rm -f {{item}}
with_items: containers.stdout_lines

View File

@ -1,4 +0,0 @@
- hosts: [service/docker]
sudo: yes
tasks:
- shell: apt-get remove -y lxc-docker

View File

@ -1,9 +0,0 @@
- hosts: [service/docker]
sudo: yes
tasks:
- shell: docker --version
ignore_errors: true
register: docker_version
- shell: curl -sSL https://get.docker.com/ | sudo sh
when: docker_version|failed

View File

@ -1,5 +0,0 @@
- hosts: [service/haproxy]
sudo: yes
tasks:
- shell: docker rm -f {{ name }} || true

View File

@ -1,25 +0,0 @@
- hosts: [service/haproxy]
sudo: yes
tasks:
- shell: echo {{name}} >> /var/lib/solar/containers_list
- shell: docker ps | grep -q {{name}}
ignore_errors: true
register: is_running
- file: state=directory path=/etc/solar/{{name}}
# TODO Remove hardcoded path
- template: src=/vagrant/examples/resources/templates/haproxy.cfg.j2 dest=/etc/solar/{{name}}/haproxy.cfg backup=yes
notify:
- haproxy_reload_config
- shell: docker run -d \
--net="host" \
--privileged \
-v /etc/solar/{{name}}/haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro \
--name {{name}} {{image}}
when: is_running|failed
handlers:
- name: haproxy_reload_config
shell: docker restart {{name}}

View File

@ -1,5 +0,0 @@
- hosts: [service/keystone]
sudo: yes
tasks:
- shell: docker rm -f {{ name }} || true

View File

@ -1,22 +0,0 @@
- hosts: [service/keystone]
sudo: yes
tasks:
- shell: echo {{name}} >> /var/lib/solar/containers_list
- shell: docker ps | grep -q {{name}}
ignore_errors: true
register: is_running
# NOTE(eli): specify restart policy (--restart parameter) for
# keystone conatiner if there are more than 2 keystone containers
# to be deployed, they both will perform db migration and will fail,
# the container which is first should create database, the rest of
# the containers should be just restarted
- shell: docker run -d --net="host" --privileged \
-e "DB_ROOT_PASSWORD={{db_root_password}}" \
-e "KEYSTONE_PUBLIC_SERVICE_PORT={{public_port}}" \
-e "KEYSTONE_ADMIN_SERVICE_PORT={{admin_port}}" \
-e "MARIADB_SERVICE_HOST={{db_host}}" \
--restart="on-failure:10" \
--name {{name}} {{image}}
when: is_running|failed

View File

@ -1,5 +0,0 @@
- hosts: [service/mariadb]
sudo: yes
tasks:
- shell: docker rm -f {{ name }} || true

View File

@ -1,17 +0,0 @@
- hosts: [service/mariadb]
sudo: yes
tasks:
- shell: echo {{name}} >> /var/lib/solar/containers_list
- shell: docker ps | grep -q {{name}}
ignore_errors: true
register: is_running
- shell: docker run \
-d \
--net="host" \
--privileged \
--name {{name}} \
-e "MARIADB_ROOT_PASSWORD={{root_password}}" \
-e "BIND_ADDRESS={{bind_ip}}" \
{{image}}
when: is_running|failed

View File

@ -1,8 +0,0 @@
- hosts: [service/mariadb]
sudo: yes
tasks:
# NOTE(eli): it will automatically create user if it does not exist
- command: docker exec -t {{name}} \
mysql -uroot -p{{root_password}} -e "GRANT ALL PRIVILEGES ON *.* TO '{{item.name}}'@'%' WITH GRANT OPTION"
with_items: users

View File

@ -1,9 +0,0 @@
- hosts: [service/mariadb]
sudo: yes
tasks:
- shell: docker exec -t {{name}} mysql -p{{root_password}} -uroot -e "select 1"
register: result
until: result.rc == 0
retries: 10
delay: 0.5

View File

@ -1,6 +0,0 @@
- hosts: [rabbitmq]
sudo: yes
tasks:
- shell: docker run --net="host" --privileged \
--name {{ rabbitmq.name }} -d {{ rabbitmq.image }}

View File

@ -1,6 +0,0 @@
- hosts: [rabbitmq]
sudo: yes
tasks:
- shell: docker run --net="host" --privileged \
--name {{ rabbitmq.name }} -d {{ rabbitmq.image }}

View File

@ -1,12 +0,0 @@
- hosts: [rabbitmq]
sudo: yes
tasks:
- shell: docker exec -i {{rabbitmq.name}} /usr/sbin/rabbitmqctl delete_user {{user.name}}
run_once: true
- hosts: [mariadb]
sudo: yes
tasks:
- command: docker exec -t {{mariadb.name}} \
mysql -uroot -e "DROP USER '{{user.name}}'"

View File

@ -1,6 +0,0 @@
- hosts: [rabbitmq]
sudo: yes
tasks:
- command: docker exec -t {{rabbitmq.name}} /usr/sbin/rabbitmqctl add_user {{user.name}} {{user.password}}
run_once: true

View File

@ -1,26 +0,0 @@
global
maxconn 4096
pidfile /var/run/haproxy.pid
defaults
mode tcp
timeout connect 5s
timeout client 1m
timeout server 1m
option redispatch
balance roundrobin
listen stats :1936
mode http
stats enable
stats hide-version
#stats realm Haproxy\ Statistics
stats uri /
#stats auth Username:Password
{% for service in services %}
listen {{ service.service_name }}
bind {{service.bind}}
{% for backend in service.backends %}
server {{ backend.remote_name }} {{ backend.remote_addr }} check inter 2s rise 3 fall 2
{% endfor %}
{% endfor %}

View File

@ -0,0 +1,18 @@
# Orchestration fixtures
Current fixtures later will be used for functional tests
* Create plan from fixture
```
solar o create solar/solar/test/orch_fixtures/simple.yaml
simple:ebd342cb-b770-4795-9f4c-04cb41c81169
```
* Run this plan
```
solar o run-once simple:ebd342cb-b770-4795-9f4c-04cb41c81169
```
* Report progress
```
solar o report simple:ebd342cb-b770-4795-9f4c-04cb41c81169
```