commit
cb2095f39c
1
example-puppet.py
Normal file → Executable file
1
example-puppet.py
Normal file → Executable file
@ -364,6 +364,7 @@ def setup_resources():
|
||||
signals.connect(node1, cinder_volume_puppet)
|
||||
signals.connect(cinder_puppet, cinder_volume_puppet)
|
||||
evapi.add_react(cinder_puppet.name, cinder_volume_puppet.name, actions=('update',))
|
||||
|
||||
# NOVA
|
||||
nova_puppet = vr.create('nova_puppet', 'resources/nova_puppet', {})[0]
|
||||
nova_db = vr.create('nova_db', 'resources/mariadb_db/', {
|
||||
|
243
example-riaks.py
Normal file
243
example-riaks.py
Normal file
@ -0,0 +1,243 @@
|
||||
# To run:
|
||||
# python example-riaks.py deploy
|
||||
# python example-riaks.py add_haproxies
|
||||
# solar changes stage
|
||||
# solar changes process
|
||||
# solar orch run-once last
|
||||
|
||||
|
||||
import click
|
||||
import sys
|
||||
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from solar.events.controls import React, Dep
|
||||
from solar.events.api import add_event
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
def setup_riak():
|
||||
db.clear()
|
||||
signals.Connections.clear()
|
||||
|
||||
nodes = vr.create('nodes', 'templates/riak_nodes.yml', {})
|
||||
node1, node2, node3 = nodes
|
||||
|
||||
riak_services = []
|
||||
ips = '10.0.0.%d'
|
||||
for i in xrange(3):
|
||||
num = i + 1
|
||||
r = vr.create('riak_service%d' % num,
|
||||
'resources/riak_node',
|
||||
{'riak_self_name': 'riak%d' % num,
|
||||
'riak_hostname': 'riak_server%d.solar' % num,
|
||||
'riak_name': 'riak%d@riak_server%d.solar' % (num, num)})[0]
|
||||
riak_services.append(r)
|
||||
|
||||
for i, riak in enumerate(riak_services):
|
||||
signals.connect(nodes[i], riak)
|
||||
|
||||
for i, riak in enumerate(riak_services[1:]):
|
||||
signals.connect(riak_services[0], riak, {'riak_name': 'join_to'}, events=None)
|
||||
|
||||
hosts_services = []
|
||||
for i, riak in enumerate(riak_services):
|
||||
num = i + 1
|
||||
hosts_file = vr.create('hosts_file%d' % num,
|
||||
'resources/hosts_file', {})[0]
|
||||
hosts_services.append(hosts_file)
|
||||
signals.connect(nodes[i], hosts_file)
|
||||
|
||||
for riak in riak_services:
|
||||
for hosts_file in hosts_services:
|
||||
signals.connect(riak, hosts_file,
|
||||
{'riak_hostname': 'hosts_names', 'ip': 'hosts_ips'},
|
||||
events=False)
|
||||
|
||||
has_errors = False
|
||||
for r in locals().values():
|
||||
|
||||
# TODO: handle list
|
||||
if not isinstance(r, resource.Resource):
|
||||
continue
|
||||
|
||||
# print 'Validating {}'.format(r.name)
|
||||
local_errors = validation.validate_resource(r)
|
||||
if local_errors:
|
||||
has_errors = True
|
||||
print 'ERROR: %s: %s' % (r.name, errors)
|
||||
|
||||
if has_errors:
|
||||
print "ERRORS"
|
||||
sys.exit(1)
|
||||
|
||||
events = [
|
||||
Dep('hosts_file1', 'run', 'success', 'riak_service1', 'run'),
|
||||
Dep('hosts_file2', 'run', 'success', 'riak_service2', 'run'),
|
||||
Dep('hosts_file3', 'run', 'success', 'riak_service3', 'run'),
|
||||
|
||||
React('riak_service2', 'run', 'success', 'riak_service2', 'join'),
|
||||
React('riak_service3', 'run', 'success', 'riak_service3', 'join'),
|
||||
|
||||
# Dep('riak_service1', 'run', 'success', 'riak_service2', 'join'),
|
||||
# Dep('riak_service1', 'run', 'success', 'riak_service3', 'join'),
|
||||
|
||||
# React('riak_service2', 'join', 'error', 'riak_service2', 'leave'),
|
||||
# React('riak_service3', 'join', 'error', 'riak_service3', 'leave'),
|
||||
|
||||
React('riak_service2', 'leave', 'success', 'riak_service2', 'join'),
|
||||
React('riak_service3', 'leave', 'success', 'riak_service3', 'join'),
|
||||
|
||||
# React('riak_service2', 'leave', 'success', 'riak_service1', 'commit_leave'),
|
||||
# React('riak_service3', 'leave', 'success', 'riak_service1', 'commit_leave'),
|
||||
|
||||
# Dep('riak_service1', 'commit_leave', 'success', 'riak_service2', 'join'),
|
||||
# Dep('riak_service1', 'commit_leave', 'success', 'riak_service3', 'join'),
|
||||
|
||||
React('riak_service3', 'join', 'success', 'riak_service1', 'commit'),
|
||||
React('riak_service2', 'join', 'success', 'riak_service1', 'commit')
|
||||
]
|
||||
|
||||
for event in events:
|
||||
add_event(event)
|
||||
|
||||
print 'Use solar changes process & orch'
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
def setup_haproxies():
|
||||
hps = []
|
||||
hpc = []
|
||||
hpsc_http = []
|
||||
hpsc_pb = []
|
||||
for i in xrange(3):
|
||||
num = i + 1
|
||||
hps.append(vr.create('haproxy_service%d' % num,
|
||||
'resources/haproxy_service',
|
||||
{})[0])
|
||||
hpc.append(vr.create('haproxy_config%d' % num,
|
||||
'resources/haproxy_config',
|
||||
{})[0])
|
||||
hpsc_http.append(vr.create('haproxy_service_config_http%d' % num,
|
||||
'resources/haproxy_service_config',
|
||||
{'listen_port': 8098,
|
||||
'protocol': 'http',
|
||||
'name': 'riak_haproxy_http%d' % num})[0])
|
||||
hpsc_pb.append(vr.create('haproxy_service_config_pb%d' % num,
|
||||
'resources/haproxy_service_config',
|
||||
{'listen_port': 8087,
|
||||
'protocol': 'tcp',
|
||||
'name': 'riak_haproxy_pb%d' % num})[0])
|
||||
|
||||
riak1 = resource.load('riak_service1')
|
||||
riak2 = resource.load('riak_service2')
|
||||
riak3 = resource.load('riak_service3')
|
||||
riaks = [riak1, riak2, riak3]
|
||||
|
||||
for single_hpsc in hpsc_http:
|
||||
for riak in riaks:
|
||||
signals.connect(riak, single_hpsc, {'riak_hostname': 'servers',
|
||||
'riak_port_http': 'ports'})
|
||||
|
||||
for single_hpsc in hpsc_pb:
|
||||
for riak in riaks:
|
||||
signals.connect(riak, single_hpsc, {'riak_hostname': 'servers',
|
||||
'riak_port_pb': 'ports'})
|
||||
|
||||
# haproxy config to haproxy service
|
||||
|
||||
for single_hpc, single_hpsc in zip(hpc, hpsc_http):
|
||||
signals.connect(single_hpsc, single_hpc, {'protocol': 'configs_protocols',
|
||||
'listen_port': 'listen_ports',
|
||||
'name': 'configs_names',
|
||||
'servers': 'configs',
|
||||
'ports': 'configs_ports'})
|
||||
|
||||
for single_hpc, single_hpsc in zip(hpc, hpsc_pb):
|
||||
signals.connect(single_hpsc, single_hpc, {'protocol': 'configs_protocols',
|
||||
'listen_port': 'listen_ports',
|
||||
'name': 'configs_names',
|
||||
'servers': 'configs',
|
||||
'ports': 'configs_ports'})
|
||||
|
||||
for single_hps, single_hpc in zip(hps, hpc):
|
||||
signals.connect(single_hpc, single_hps, {'listen_ports': 'ports'},
|
||||
events=False)
|
||||
|
||||
# assign haproxy services to each node
|
||||
|
||||
node1 = resource.load('node1')
|
||||
node2 = resource.load('node2')
|
||||
node3 = resource.load('node3')
|
||||
nodes = [node1, node2, node3]
|
||||
|
||||
for single_node, single_hps in zip(nodes, hps):
|
||||
signals.connect(single_node, single_hps)
|
||||
|
||||
for single_node, single_hpc in zip(nodes, hpc):
|
||||
signals.connect(single_node, single_hpc)
|
||||
|
||||
has_errors = False
|
||||
for r in locals().values():
|
||||
|
||||
# TODO: handle list
|
||||
if not isinstance(r, resource.Resource):
|
||||
continue
|
||||
|
||||
# print 'Validating {}'.format(r.name)
|
||||
local_errors = validation.validate_resource(r)
|
||||
if local_errors:
|
||||
has_errors = True
|
||||
print 'ERROR: %s: %s' % (r.name, errors)
|
||||
|
||||
if has_errors:
|
||||
print "ERRORS"
|
||||
sys.exit(1)
|
||||
|
||||
events = []
|
||||
for node, single_hps, single_hpc in zip(nodes, hps, hpc):
|
||||
# r = React(node.name, 'run', 'success', single_hps.name, 'install')
|
||||
d = Dep(single_hps.name, 'run', 'success', single_hpc.name, 'run')
|
||||
e1 = React(single_hpc.name, 'run', 'success', single_hps.name, 'apply_config')
|
||||
e2 = React(single_hpc.name, 'update', 'success', single_hps.name, 'apply_config')
|
||||
# events.extend([r, d, e1, e2])
|
||||
events.extend([d, e1, e2])
|
||||
|
||||
for event in events:
|
||||
add_event(event)
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
pass
|
||||
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
setup_riak()
|
||||
|
||||
@click.command()
|
||||
def add_haproxies():
|
||||
setup_haproxies()
|
||||
|
||||
|
||||
@click.command()
|
||||
def undeploy():
|
||||
raise NotImplemented("Not yet")
|
||||
|
||||
|
||||
main.add_command(deploy)
|
||||
main.add_command(undeploy)
|
||||
main.add_command(add_haproxies)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
@ -5,9 +5,10 @@
|
||||
config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}}
|
||||
haproxy_ip: {{ ip }}
|
||||
haproxy_services:
|
||||
{% for service, ports, listen_port in zip(configs.value, configs_ports.value, listen_ports.value) %}
|
||||
{% for service, ports, listen_port, protocol in zip(configs.value, configs_ports.value, listen_ports.value, configs_protocols.value) %}
|
||||
- name: {{ service['emitter_attached_to'] }}
|
||||
listen_port: {{ listen_port['value'] }}
|
||||
protocol: {{ protocol['value'] }}
|
||||
servers:
|
||||
{% for server_ip, server_port in zip(service['value'], ports['value']) %}
|
||||
- name: {{ server_ip['emitter_attached_to'] }}
|
||||
@ -18,4 +19,4 @@
|
||||
tasks:
|
||||
- file: path={{ config_dir.value['src'] }}/ state=directory
|
||||
- file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch
|
||||
- template: src={{ resource_dir }}/templates/haproxy.cfg dest={{ config_dir.value['src'] }}/haproxy.cfg
|
||||
- template: src={{ resource_dir }}/templates/haproxy.cfg dest=/etc/haproxy/haproxy.cfg
|
||||
|
22
resources/haproxy_config/actions/update.yml
Normal file
22
resources/haproxy_config/actions/update.yml
Normal file
@ -0,0 +1,22 @@
|
||||
# TODO
|
||||
- hosts: [{{ip}}]
|
||||
sudo: yes
|
||||
vars:
|
||||
config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}}
|
||||
haproxy_ip: {{ ip }}
|
||||
haproxy_services:
|
||||
{% for service, ports, listen_port, protocol in zip(configs.value, configs_ports.value, listen_ports.value, configs_protocols.value) %}
|
||||
- name: {{ service['emitter_attached_to'] }}
|
||||
listen_port: {{ listen_port['value'] }}
|
||||
protocol: {{ protocol['value'] }}
|
||||
servers:
|
||||
{% for server_ip, server_port in zip(service['value'], ports['value']) %}
|
||||
- name: {{ server_ip['emitter_attached_to'] }}
|
||||
ip: {{ server_ip['value'] }}
|
||||
port: {{ server_port['value'] }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
tasks:
|
||||
- file: path={{ config_dir.value['src'] }}/ state=directory
|
||||
- file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch
|
||||
- template: src={{ resource_dir }}/templates/haproxy.cfg dest=/etc/haproxy/haproxy.cfg
|
@ -20,6 +20,9 @@ input:
|
||||
configs_ports:
|
||||
schema: [{value: [{value: int}]}]
|
||||
value: []
|
||||
configs_protocols:
|
||||
schema: [{value: str}]
|
||||
value: []
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
@ -2,13 +2,13 @@ global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 4096
|
||||
tune.ssl.default-dh-param 2048
|
||||
# tune.ssl.default-dh-param 2048
|
||||
pidfile /var/run/haproxy.pid
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
stats socket /var/run/haproxy.stats level admin
|
||||
ssl-default-bind-options no-sslv3
|
||||
# ssl-default-bind-options no-sslv3 # ubuntu 14.04 have too old haproxy
|
||||
|
||||
defaults
|
||||
log global
|
||||
@ -30,6 +30,7 @@ defaults
|
||||
|
||||
{% for service in haproxy_services %}
|
||||
listen {{ service['name'] }} 0.0.0.0:{{ service['listen_port'] }}
|
||||
{% if service['protocol'] == 'http' %}
|
||||
mode http
|
||||
stats enable
|
||||
stats uri /haproxy?stats
|
||||
@ -42,5 +43,17 @@ listen {{ service['name'] }} 0.0.0.0:{{ service['listen_port'] }}
|
||||
{% for server in service['servers'] %}
|
||||
server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check
|
||||
{% endfor %}
|
||||
{% elif service['protocol'] == 'tcp' %}
|
||||
# tcp there
|
||||
mode tcp
|
||||
mode tcp
|
||||
option tcpka
|
||||
option srvtcpka
|
||||
balance leastconn
|
||||
{% for server in service['servers'] %}
|
||||
server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% endfor %}
|
||||
|
7
resources/haproxy_service/actions/apply_config.yml
Normal file
7
resources/haproxy_service/actions/apply_config.yml
Normal file
@ -0,0 +1,7 @@
|
||||
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- service:
|
||||
name: haproxy
|
||||
state: reloaded
|
@ -1,27 +1,10 @@
|
||||
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- docker:
|
||||
name: {{ resource_name }}
|
||||
image: {{ image }}
|
||||
state: running
|
||||
net: host
|
||||
{% if ports.value %}
|
||||
ports:
|
||||
{% for port in ports.value %}
|
||||
{% for p in port['value'] %}
|
||||
- {{ p['value'] }}:{{ p['value'] }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if host_binds.value %}
|
||||
volumes:
|
||||
# TODO: host_binds might need more work
|
||||
# Currently it's not that trivial to pass custom src: dst here
|
||||
# (when a config variable is passed here from other resource)
|
||||
# so we mount it to the same directory as on host
|
||||
{% for bind in host_binds.value %}
|
||||
- {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- apt:
|
||||
name: haproxy
|
||||
state: present
|
||||
- replace:
|
||||
dest: '/etc/default/haproxy'
|
||||
regexp: ENABLED=0
|
||||
replace: ENABLED=1
|
||||
|
7
resources/haproxy_service/actions/update.yml
Normal file
7
resources/haproxy_service/actions/update.yml
Normal file
@ -0,0 +1,7 @@
|
||||
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- service:
|
||||
name: haproxy
|
||||
state: reloaded
|
@ -5,18 +5,9 @@ input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
image:
|
||||
schema: str!
|
||||
value: tutum/haproxy
|
||||
ports:
|
||||
schema: [{value: [{value: int}]}]
|
||||
value: []
|
||||
host_binds:
|
||||
schema: [{value: {src: str, dst: str}}]
|
||||
value: []
|
||||
volume_binds:
|
||||
schema: [{src: str, dst: str}]
|
||||
value: []
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
@ -1,13 +1,16 @@
|
||||
id: haproxy_keystone_config
|
||||
id: haproxy_general_config
|
||||
handler: none
|
||||
version: 1.0.0
|
||||
input:
|
||||
name:
|
||||
schema: str!
|
||||
value: keystone-admin
|
||||
value: general_haproxy
|
||||
listen_port:
|
||||
schema: int!
|
||||
value: 9999
|
||||
protocol:
|
||||
schema: str!
|
||||
value: http
|
||||
ports:
|
||||
schema: [{value: int}]
|
||||
value: []
|
||||
@ -15,4 +18,4 @@ input:
|
||||
schema: [{value: str}]
|
||||
value: []
|
||||
|
||||
tags: [resources/haproxy, resource/haproxy_keystone_config]
|
||||
tags: [resources/haproxy, resource/haproxy_general_config]
|
||||
|
11
resources/hosts_file/actions/run.yml
Normal file
11
resources/hosts_file/actions/run.yml
Normal file
@ -0,0 +1,11 @@
|
||||
- hosts: {{ip}}
|
||||
sudo: yes
|
||||
tasks:
|
||||
{% for ip, host in zip(hosts_ips.value, hosts_names.value) %}
|
||||
- name: Create hosts entries for {{host['value']}} => {{ip['value']}}
|
||||
lineinfile:
|
||||
dest: /etc/hosts
|
||||
regexp: '.*{{host["value"]}}$'
|
||||
line: '{{ip["value"]}} {{host["value"]}}'
|
||||
state: present
|
||||
{% endfor %}
|
20
resources/hosts_file/meta.yaml
Normal file
20
resources/hosts_file/meta.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
id: hosts_file
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
hosts_names:
|
||||
schema: [{value: str!}]
|
||||
value: []
|
||||
hosts_ips:
|
||||
schema: [{value: str!}]
|
||||
value: []
|
4
resources/riak_join_single/actions/join.yml
Normal file
4
resources/riak_join_single/actions/join.yml
Normal file
@ -0,0 +1,4 @@
|
||||
- hosts: [{{ip}}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- shell: riak-admin cluster join {{join_to}}
|
18
resources/riak_join_single/meta.yaml
Normal file
18
resources/riak_join_single/meta.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
id: riak_join_single
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
actions:
|
||||
join: actions/join.yml
|
||||
input:
|
||||
join_to:
|
||||
schema: str!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
6
resources/riak_node/actions/commit.yml
Normal file
6
resources/riak_node/actions/commit.yml
Normal file
@ -0,0 +1,6 @@
|
||||
- hosts: [{{ip}}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
# - shell: sleep 30
|
||||
- shell: riak-admin cluster plan
|
||||
- shell: riak-admin cluster commit
|
15
resources/riak_node/actions/join.yml
Normal file
15
resources/riak_node/actions/join.yml
Normal file
@ -0,0 +1,15 @@
|
||||
- hosts: [{{ip}}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- shell: riak-admin cluster join {{join_to}}
|
||||
ignore_errors: true
|
||||
register: join_output
|
||||
# those below are hacky solution for "this node is already member of a cluster
|
||||
# solar for now lacks logic that would allow to avoid it
|
||||
- shell: /bin/true
|
||||
when: join_output|failed and join_output.stdout.find("This node is already a member of a cluster") != -1
|
||||
- shell: /bin/false
|
||||
when: join_output|failed and join_output.stdout.find("This node is already a member of a cluster") == -1
|
||||
- shell: /bin/true
|
||||
when: join_output|success
|
||||
|
6
resources/riak_node/actions/remove.yml
Normal file
6
resources/riak_node/actions/remove.yml
Normal file
@ -0,0 +1,6 @@
|
||||
- hosts: [{{ip}}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- apt:
|
||||
name: riak
|
||||
state: absent
|
28
resources/riak_node/actions/run.yml
Normal file
28
resources/riak_node/actions/run.yml
Normal file
@ -0,0 +1,28 @@
|
||||
- hosts: [{{ip}}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
# those below are mostly for tests
|
||||
- shell: killall -u riak
|
||||
ignore_errors: yes
|
||||
# remove above when non tests
|
||||
|
||||
# we install ubuntu repo there,
|
||||
# NOT recommended on production
|
||||
- shell: curl -s https://packagecloud.io/install/repositories/basho/riak/script.deb.sh | sudo bash
|
||||
|
||||
- apt:
|
||||
name: riak
|
||||
state: present
|
||||
- service:
|
||||
name: riak
|
||||
state: stopped
|
||||
- file: path=/etc/riak/riak.conf state=touch
|
||||
- template:
|
||||
src: {{ resource_dir }}/templates/riak.conf
|
||||
dest: /etc/riak/riak.conf
|
||||
- shell: rm -fr /var/lib/riak/kv_vnode/*
|
||||
- shell: rm -fr /var/lib/riak/ring/*
|
||||
|
||||
- service:
|
||||
name: riak
|
||||
state: reloaded
|
12
resources/riak_node/actions/update.yml
Normal file
12
resources/riak_node/actions/update.yml
Normal file
@ -0,0 +1,12 @@
|
||||
- hosts: [{{host}}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- service:
|
||||
name: riak
|
||||
state: stopped
|
||||
- template:
|
||||
src: {{ resource_dir }}/templates/riak.conf
|
||||
dest: /etc/riak/riak.conf
|
||||
- service:
|
||||
name: riak
|
||||
state: reloaded
|
39
resources/riak_node/meta.yaml
Normal file
39
resources/riak_node/meta.yaml
Normal file
@ -0,0 +1,39 @@
|
||||
id: riak_node
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
actions:
|
||||
commit: actions/commit.yml
|
||||
run: actions/run.yml
|
||||
join: actions/join.yml
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
riak_self_name:
|
||||
schema: str!
|
||||
value:
|
||||
riak_hostname:
|
||||
schema: str!
|
||||
value:
|
||||
riak_name:
|
||||
schema: str!
|
||||
# value: "{{riak_self_name}}@{{riak_hostname}}"
|
||||
value: "{{riak_self_name}}@{{ip}}"
|
||||
riak_port_http:
|
||||
schema: int!
|
||||
value: 18098
|
||||
riak_port_pb:
|
||||
schema: int!
|
||||
value: 18087
|
||||
riak_port_solr:
|
||||
schema: int!
|
||||
value: 8985
|
||||
join_to:
|
||||
schema: str
|
||||
value:
|
494
resources/riak_node/templates/riak.conf
Normal file
494
resources/riak_node/templates/riak.conf
Normal file
@ -0,0 +1,494 @@
|
||||
## Where to emit the default log messages (typically at 'info'
|
||||
## severity):
|
||||
## off: disabled
|
||||
## file: the file specified by log.console.file
|
||||
## console: to standard output (seen when using `riak attach-direct`)
|
||||
## both: log.console.file and standard out.
|
||||
##
|
||||
## Default: file
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: off, file, console, both
|
||||
log.console = file
|
||||
|
||||
## The severity level of the console log, default is 'info'.
|
||||
##
|
||||
## Default: info
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: debug, info, notice, warning, error, critical, alert, emergency, none
|
||||
log.console.level = info
|
||||
|
||||
## When 'log.console' is set to 'file' or 'both', the file where
|
||||
## console messages will be logged.
|
||||
##
|
||||
## Default: $(platform_log_dir)/console.log
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a file
|
||||
log.console.file = $(platform_log_dir)/console.log
|
||||
|
||||
## The file where error messages will be logged.
|
||||
##
|
||||
## Default: $(platform_log_dir)/error.log
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a file
|
||||
log.error.file = $(platform_log_dir)/error.log
|
||||
|
||||
## When set to 'on', enables log output to syslog.
|
||||
##
|
||||
## Default: off
|
||||
##
|
||||
## Acceptable values:
|
||||
## - on or off
|
||||
log.syslog = off
|
||||
|
||||
## Whether to enable the crash log.
|
||||
##
|
||||
## Default: on
|
||||
##
|
||||
## Acceptable values:
|
||||
## - on or off
|
||||
log.crash = on
|
||||
|
||||
## If the crash log is enabled, the file where its messages will
|
||||
## be written.
|
||||
##
|
||||
## Default: $(platform_log_dir)/crash.log
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a file
|
||||
log.crash.file = $(platform_log_dir)/crash.log
|
||||
|
||||
## Maximum size in bytes of individual messages in the crash log
|
||||
##
|
||||
## Default: 64KB
|
||||
##
|
||||
## Acceptable values:
|
||||
## - a byte size with units, e.g. 10GB
|
||||
log.crash.maximum_message_size = 64KB
|
||||
|
||||
## Maximum size of the crash log in bytes, before it is rotated
|
||||
##
|
||||
## Default: 10MB
|
||||
##
|
||||
## Acceptable values:
|
||||
## - a byte size with units, e.g. 10GB
|
||||
log.crash.size = 10MB
|
||||
|
||||
## The schedule on which to rotate the crash log. For more
|
||||
## information see:
|
||||
## https://github.com/basho/lager/blob/master/README.md#internal-log-rotation
|
||||
##
|
||||
## Default: $D0
|
||||
##
|
||||
## Acceptable values:
|
||||
## - text
|
||||
log.crash.rotation = $D0
|
||||
|
||||
## The number of rotated crash logs to keep. When set to
|
||||
## 'current', only the current open log file is kept.
|
||||
##
|
||||
## Default: 5
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
## - the text "current"
|
||||
log.crash.rotation.keep = 5
|
||||
|
||||
## Name of the Erlang node
|
||||
##
|
||||
## Default: riak@127.0.0.1
|
||||
##
|
||||
## Acceptable values:
|
||||
## - text
|
||||
nodename = {{riak_name}}
|
||||
|
||||
## Cookie for distributed node communication. All nodes in the
|
||||
## same cluster should use the same cookie or they will not be able to
|
||||
## communicate.
|
||||
##
|
||||
## Default: riak
|
||||
##
|
||||
## Acceptable values:
|
||||
## - text
|
||||
distributed_cookie = riak
|
||||
|
||||
## Sets the number of threads in async thread pool, valid range
|
||||
## is 0-1024. If thread support is available, the default is 64.
|
||||
## More information at: http://erlang.org/doc/man/erl.html
|
||||
##
|
||||
## Default: 64
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
erlang.async_threads = 64
|
||||
|
||||
## The number of concurrent ports/sockets
|
||||
## Valid range is 1024-134217727
|
||||
##
|
||||
## Default: 65536
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
erlang.max_ports = 65536
|
||||
|
||||
## Set scheduler forced wakeup interval. All run queues will be
|
||||
## scanned each Interval milliseconds. While there are sleeping
|
||||
## schedulers in the system, one scheduler will be woken for each
|
||||
## non-empty run queue found. An Interval of zero disables this
|
||||
## feature, which also is the default.
|
||||
## This feature is a workaround for lengthy executing native code, and
|
||||
## native code that do not bump reductions properly.
|
||||
## More information: http://www.erlang.org/doc/man/erl.html#+sfwi
|
||||
##
|
||||
## Default: 500
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
## erlang.schedulers.force_wakeup_interval = 500
|
||||
|
||||
## Enable or disable scheduler compaction of load. By default
|
||||
## scheduler compaction of load is enabled. When enabled, load
|
||||
## balancing will strive for a load distribution which causes as many
|
||||
## scheduler threads as possible to be fully loaded (i.e., not run out
|
||||
## of work). This is accomplished by migrating load (e.g. runnable
|
||||
## processes) into a smaller set of schedulers when schedulers
|
||||
## frequently run out of work. When disabled, the frequency with which
|
||||
## schedulers run out of work will not be taken into account by the
|
||||
## load balancing logic.
|
||||
## More information: http://www.erlang.org/doc/man/erl.html#+scl
|
||||
##
|
||||
## Default: false
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: true, false
|
||||
## erlang.schedulers.compaction_of_load = false
|
||||
|
||||
## Enable or disable scheduler utilization balancing of load. By
|
||||
## default scheduler utilization balancing is disabled and instead
|
||||
## scheduler compaction of load is enabled which will strive for a
|
||||
## load distribution which causes as many scheduler threads as
|
||||
## possible to be fully loaded (i.e., not run out of work). When
|
||||
## scheduler utilization balancing is enabled the system will instead
|
||||
## try to balance scheduler utilization between schedulers. That is,
|
||||
## strive for equal scheduler utilization on all schedulers.
|
||||
## More information: http://www.erlang.org/doc/man/erl.html#+sub
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: true, false
|
||||
## erlang.schedulers.utilization_balancing = true
|
||||
|
||||
## Number of partitions in the cluster (only valid when first
|
||||
## creating the cluster). Must be a power of 2, minimum 8 and maximum
|
||||
## 1024.
|
||||
##
|
||||
## Default: 64
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
ring_size = 8
|
||||
|
||||
## Number of concurrent node-to-node transfers allowed.
|
||||
##
|
||||
## Default: 2
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
## transfer_limit = 2
|
||||
|
||||
## Default cert location for https can be overridden
|
||||
## with the ssl config variable, for example:
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a file
|
||||
## ssl.certfile = $(platform_etc_dir)/cert.pem
|
||||
|
||||
## Default key location for https can be overridden with the ssl
|
||||
## config variable, for example:
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a file
|
||||
## ssl.keyfile = $(platform_etc_dir)/key.pem
|
||||
|
||||
## Default signing authority location for https can be overridden
|
||||
## with the ssl config variable, for example:
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a file
|
||||
## ssl.cacertfile = $(platform_etc_dir)/cacertfile.pem
|
||||
|
||||
## DTrace support Do not enable 'dtrace' unless your Erlang/OTP
|
||||
## runtime is compiled to support DTrace. DTrace is available in
|
||||
## R15B01 (supported by the Erlang/OTP official source package) and in
|
||||
## R14B04 via a custom source repository & branch.
|
||||
##
|
||||
## Default: off
|
||||
##
|
||||
## Acceptable values:
|
||||
## - on or off
|
||||
dtrace = off
|
||||
|
||||
## Platform-specific installation paths (substituted by rebar)
|
||||
##
|
||||
## Default: ./bin
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a directory
|
||||
platform_bin_dir = ./bin
|
||||
|
||||
##
|
||||
## Default: ./data
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a directory
|
||||
platform_data_dir = ./data
|
||||
|
||||
##
|
||||
## Default: ./etc
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a directory
|
||||
platform_etc_dir = ./etc
|
||||
|
||||
##
|
||||
## Default: ./lib
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a directory
|
||||
platform_lib_dir = ./lib
|
||||
|
||||
##
|
||||
## Default: ./log
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a directory
|
||||
platform_log_dir = ./log
|
||||
|
||||
## Enable consensus subsystem. Set to 'on' to enable the
|
||||
## consensus subsystem used for strongly consistent Riak operations.
|
||||
##
|
||||
## Default: off
|
||||
##
|
||||
## Acceptable values:
|
||||
## - on or off
|
||||
## strong_consistency = on
|
||||
|
||||
## listener.http.<name> is an IP address and TCP port that the Riak
|
||||
## HTTP interface will bind.
|
||||
##
|
||||
## Default: 127.0.0.1:8098
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an IP/port pair, e.g. 127.0.0.1:10011
|
||||
listener.http.internal = 0.0.0.0:{{riak_port_http}}
|
||||
|
||||
## listener.protobuf.<name> is an IP address and TCP port that the Riak
|
||||
## Protocol Buffers interface will bind.
|
||||
##
|
||||
## Default: 127.0.0.1:8087
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an IP/port pair, e.g. 127.0.0.1:10011
|
||||
listener.protobuf.internal = 0.0.0.0:{{riak_port_pb}}
|
||||
|
||||
## The maximum length to which the queue of pending connections
|
||||
## may grow. If set, it must be an integer > 0. If you anticipate a
|
||||
## huge number of connections being initialized *simultaneously*, set
|
||||
## this number higher.
|
||||
##
|
||||
## Default: 128
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
## protobuf.backlog = 128
|
||||
|
||||
## listener.https.<name> is an IP address and TCP port that the Riak
|
||||
## HTTPS interface will bind.
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an IP/port pair, e.g. 127.0.0.1:10011
|
||||
## listener.https.internal = 127.0.0.1:8098
|
||||
|
||||
## How Riak will repair out-of-sync keys. Some features require
|
||||
## this to be set to 'active', including search.
|
||||
## * active: out-of-sync keys will be repaired in the background
|
||||
## * passive: out-of-sync keys are only repaired on read
|
||||
## * active-debug: like active, but outputs verbose debugging
|
||||
## information
|
||||
##
|
||||
## Default: active
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: active, passive, active-debug
|
||||
anti_entropy = active
|
||||
|
||||
## Specifies the storage engine used for Riak's key-value data
|
||||
## and secondary indexes (if supported).
|
||||
##
|
||||
## Default: bitcask
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: bitcask, leveldb, memory, multi
|
||||
storage_backend = bitcask
|
||||
|
||||
## Controls which binary representation of a riak value is stored
|
||||
## on disk.
|
||||
## * 0: Original erlang:term_to_binary format. Higher space overhead.
|
||||
## * 1: New format for more compact storage of small values.
|
||||
##
|
||||
## Default: 1
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the integer 1
|
||||
## - the integer 0
|
||||
object.format = 1
|
||||
|
||||
## Reading or writing objects bigger than this size will write a
|
||||
## warning in the logs.
|
||||
##
|
||||
## Default: 5MB
|
||||
##
|
||||
## Acceptable values:
|
||||
## - a byte size with units, e.g. 10GB
|
||||
object.size.warning_threshold = 5MB
|
||||
|
||||
## Writing an object bigger than this will send a failure to the
|
||||
## client.
|
||||
##
|
||||
## Default: 50MB
|
||||
##
|
||||
## Acceptable values:
|
||||
## - a byte size with units, e.g. 10GB
|
||||
object.size.maximum = 50MB
|
||||
|
||||
## Writing an object with more than this number of siblings will
|
||||
## generate a warning in the logs.
|
||||
##
|
||||
## Default: 25
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
object.siblings.warning_threshold = 25
|
||||
|
||||
## Writing an object with more than this number of siblings will
|
||||
## send a failure to the client.
|
||||
##
|
||||
## Default: 100
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
object.siblings.maximum = 100
|
||||
|
||||
## A path under which bitcask data files will be stored.
|
||||
##
|
||||
## Default: $(platform_data_dir)/bitcask
|
||||
##
|
||||
## Acceptable values:
|
||||
## - the path to a directory
|
||||
bitcask.data_root = $(platform_data_dir)/bitcask
|
||||
|
||||
## Configure how Bitcask writes data to disk.
|
||||
## erlang: Erlang's built-in file API
|
||||
## nif: Direct calls to the POSIX C API
|
||||
## The NIF mode provides higher throughput for certain
|
||||
## workloads, but has the potential to negatively impact
|
||||
## the Erlang VM, leading to higher worst-case latencies
|
||||
## and possible throughput collapse.
|
||||
##
|
||||
## Default: erlang
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: erlang, nif
|
||||
bitcask.io_mode = erlang
|
||||
|
||||
## Set to 'off' to disable the admin panel.
|
||||
##
|
||||
## Default: off
|
||||
##
|
||||
## Acceptable values:
|
||||
## - on or off
|
||||
riak_control = on
|
||||
|
||||
## Authentication mode used for access to the admin panel.
|
||||
##
|
||||
## Default: off
|
||||
##
|
||||
## Acceptable values:
|
||||
## - one of: off, userlist
|
||||
riak_control.auth.mode = off
|
||||
|
||||
## If riak control's authentication mode (riak_control.auth.mode)
|
||||
## is set to 'userlist' then this is the list of usernames and
|
||||
## passwords for access to the admin panel.
|
||||
## To create users with given names, add entries of the format:
|
||||
## riak_control.auth.user.USERNAME.password = PASSWORD
|
||||
## replacing USERNAME with the desired username and PASSWORD with the
|
||||
## desired password for that user.
|
||||
##
|
||||
## Acceptable values:
|
||||
## - text
|
||||
## riak_control.auth.user.admin.password = pass
|
||||
|
||||
## This parameter defines the percentage of total server memory
|
||||
## to assign to LevelDB. LevelDB will dynamically adjust its internal
|
||||
## cache sizes to stay within this size. The memory size can
|
||||
## alternately be assigned as a byte count via leveldb.maximum_memory
|
||||
## instead.
|
||||
##
|
||||
## Default: 70
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
leveldb.maximum_memory.percent = 70
|
||||
|
||||
## To enable Search set this 'on'.
|
||||
##
|
||||
## Default: off
|
||||
##
|
||||
## Acceptable values:
|
||||
## - on or off
|
||||
search = off
|
||||
|
||||
## How long Riak will wait for Solr to start. The start sequence
|
||||
## will be tried twice. If both attempts timeout, then the Riak node
|
||||
## will be shutdown. This may need to be increased as more data is
|
||||
## indexed and Solr takes longer to start. Values lower than 1s will
|
||||
## be rounded up to the minimum 1s.
|
||||
##
|
||||
## Default: 30s
|
||||
##
|
||||
## Acceptable values:
|
||||
## - a time duration with units, e.g. '10s' for 10 seconds
|
||||
search.solr.start_timeout = 30s
|
||||
|
||||
## The port number which Solr binds to.
|
||||
## NOTE: Binds on every interface.
|
||||
##
|
||||
## Default: 8093
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
search.solr.port = 8093
|
||||
|
||||
## The port number which Solr JMX binds to.
|
||||
## NOTE: Binds on every interface.
|
||||
##
|
||||
## Default: 8985
|
||||
##
|
||||
## Acceptable values:
|
||||
## - an integer
|
||||
search.solr.jmx_port = 8985
|
||||
|
||||
## The options to pass to the Solr JVM. Non-standard options,
|
||||
## i.e. -XX, may not be portable across JVM implementations.
|
||||
## E.g. -XX:+UseCompressedStrings
|
||||
##
|
||||
## Default: -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
|
||||
##
|
||||
## Acceptable values:
|
||||
## - text
|
||||
search.solr.jvm_options = -d64 -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
|
||||
|
||||
# new
|
@ -26,13 +26,28 @@ def validate():
|
||||
|
||||
|
||||
@changes.command()
|
||||
def stage():
|
||||
@click.option('-d', default=False, is_flag=True)
|
||||
def stage(d):
|
||||
log = list(change.stage_changes().reverse())
|
||||
for item in log:
|
||||
click.echo(item)
|
||||
if d:
|
||||
for line in item.details:
|
||||
click.echo(' '*4+line)
|
||||
if not log:
|
||||
click.echo('No changes')
|
||||
|
||||
@changes.command(name='staged-item')
|
||||
@click.argument('log_action')
|
||||
@click.option('-d', default=True, is_flag=True)
|
||||
def staged_item(log_action, d):
|
||||
item = data.SL().get(log_action)
|
||||
if not item:
|
||||
click.echo('No staged changes for {}'.format(log_action))
|
||||
else:
|
||||
click.echo(item)
|
||||
for line in item.details:
|
||||
click.echo(' '*4+line)
|
||||
|
||||
@changes.command()
|
||||
def process():
|
||||
|
10
solar/solar/core/handlers/python.py
Normal file
10
solar/solar/core/handlers/python.py
Normal file
@ -0,0 +1,10 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from fabric import api as fabric_api
|
||||
|
||||
from solar.core.handlers.base import TempFileHandler
|
||||
|
||||
|
||||
class Python(TempFileHandler):
|
||||
def action(self, resource, action_name):
|
||||
action_file = self._compile_action_file(resource, action_name)
|
||||
fabric_api.local('python {}'.format(action_file))
|
@ -135,7 +135,14 @@ def connect_single(emitter, src, receiver, dst):
|
||||
emitter.args[src].subscribe(receiver.args[dst])
|
||||
|
||||
|
||||
def connect(emitter, receiver, mapping=None):
|
||||
def connect(emitter, receiver, mapping=None, events=None):
|
||||
# convert if needed
|
||||
# TODO: handle invalid resource
|
||||
# if isinstance(emitter, basestring):
|
||||
# emitter = resource.load(emitter)
|
||||
# if isinstance(receiver, basestring):
|
||||
# receiver = resource.load(receiver)
|
||||
|
||||
mapping = mapping or guess_mapping(emitter, receiver)
|
||||
|
||||
if isinstance(mapping, set):
|
||||
@ -151,15 +158,31 @@ def connect(emitter, receiver, mapping=None):
|
||||
|
||||
connect_single(emitter, src, receiver, dst)
|
||||
|
||||
events = [
|
||||
# possibility to set events, when False it will NOT add events at all
|
||||
# setting events to dict with `action_name`:False will not add `action_name`
|
||||
# event
|
||||
events_to_add = [
|
||||
Dependency(emitter.name, 'run', 'success', receiver.name, 'run'),
|
||||
Dependency(emitter.name, 'update', 'success', receiver.name, 'update')
|
||||
]
|
||||
add_events(emitter.name, events)
|
||||
#receiver.save()
|
||||
]
|
||||
if isinstance(events, dict):
|
||||
for k, v in events.items():
|
||||
if v is not False:
|
||||
events_to_add = filter(lambda x: x.parent_action == k, events_to_add)
|
||||
add_events(emitter.name, events_to_add)
|
||||
elif events is not False:
|
||||
add_events(emitter.name, events_to_add)
|
||||
|
||||
# receiver.save()
|
||||
|
||||
def disconnect(emitter, receiver):
|
||||
# convert if needed
|
||||
# TODO: handle invalid resource
|
||||
# if isinstance(emitter, basestring):
|
||||
# emitter = resource.load(emitter)
|
||||
# if isinstance(receiver, basestring):
|
||||
# receiver = resource.load(receiver)
|
||||
|
||||
clients = Connections.read_clients()
|
||||
|
||||
for src, destinations in clients[emitter.name].items():
|
||||
|
@ -114,6 +114,7 @@ def build_edges(changed_resources, changes_graph, events):
|
||||
:param events: {res: [controls.Event objects]}
|
||||
"""
|
||||
stack = changed_resources[:]
|
||||
visited = []
|
||||
while stack:
|
||||
node = stack.pop()
|
||||
|
||||
@ -122,7 +123,9 @@ def build_edges(changed_resources, changes_graph, events):
|
||||
else:
|
||||
log.debug('No dependencies based on %s', node)
|
||||
|
||||
for ev in events.get(node, ()):
|
||||
ev.insert(stack, changes_graph)
|
||||
if node not in visited:
|
||||
for ev in events.get(node, ()):
|
||||
ev.insert(stack, changes_graph)
|
||||
|
||||
visited.append(node)
|
||||
return changes_graph
|
||||
|
@ -63,6 +63,20 @@ class LogItem(object):
|
||||
def compact(self):
|
||||
return 'log task={} uid={}'.format(self.log_action, self.uid)
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
rst = []
|
||||
for type_, val, change in self.diff:
|
||||
if type_ == 'add':
|
||||
for it in change:
|
||||
if isinstance(it, dict):
|
||||
rst.append('++ {}: {}'.format(it[0], it[1]['value']))
|
||||
else:
|
||||
rst.append('++ {}: {}'.format(it[0], str(it[1])))
|
||||
elif type_ == 'change':
|
||||
rst.append('-+ {}: {} >> {}'.format(val, change[0], change[1]))
|
||||
return rst
|
||||
|
||||
|
||||
class Log(object):
|
||||
|
||||
|
@ -66,3 +66,21 @@ def test_rmq(rmq_deps):
|
||||
|
||||
assert set(changes_graph.successors('rmq_cluster.1.create')) == {
|
||||
'rmq_cluster.2.join', 'rmq_cluster.3.join'}
|
||||
|
||||
|
||||
def test_riak():
|
||||
|
||||
events = {
|
||||
'riak_service1': [evapi.React('riak_service1', 'run', 'success', 'riak_service2', 'join'),
|
||||
evapi.React('riak_service1', 'run', 'success', 'riak_service3', 'join')],
|
||||
'riak_service3': [evapi.React('riak_service3', 'join', 'success', 'riak_service1', 'commit')],
|
||||
'riak_service2': [evapi.React('riak_service2', 'join', 'success', 'riak_service1', 'commit')],
|
||||
|
||||
}
|
||||
changed = ['riak_service1']
|
||||
changes_graph = nx.DiGraph()
|
||||
changes_graph.add_node('riak_service1.run')
|
||||
evapi.build_edges(changed, changes_graph, events)
|
||||
assert nx.topological_sort(changes_graph) == [
|
||||
'riak_service1.run', 'riak_service2.join', 'riak_service3.join', 'riak_service1.commit']
|
||||
|
||||
|
20
templates/riak_nodes.yml
Normal file
20
templates/riak_nodes.yml
Normal file
@ -0,0 +1,20 @@
|
||||
id: simple_riak_cluster
|
||||
resources:
|
||||
- id: node1
|
||||
from: resources/ro_node
|
||||
values:
|
||||
ip: '10.0.0.3'
|
||||
ssh_key: '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key'
|
||||
ssh_user: 'vagrant'
|
||||
- id: node2
|
||||
from: resources/ro_node
|
||||
values:
|
||||
ip: '10.0.0.4'
|
||||
ssh_key: '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key'
|
||||
ssh_user: 'vagrant'
|
||||
- id: node3
|
||||
from: resources/ro_node
|
||||
values:
|
||||
ip: '10.0.0.5'
|
||||
ssh_key: '/vagrant/.vagrant/machines/solar-dev3/virtualbox/private_key'
|
||||
ssh_user: 'vagrant'
|
Loading…
x
Reference in New Issue
Block a user