Merge branch 'x' into master-x-merge
This commit is contained in:
commit
7417253207
3
.gitignore
vendored
3
.gitignore
vendored
@ -7,3 +7,6 @@
|
|||||||
.vagrant
|
.vagrant
|
||||||
|
|
||||||
tmp/
|
tmp/
|
||||||
|
|
||||||
|
#vim
|
||||||
|
*.swp
|
||||||
|
28
Vagrantfile
vendored
28
Vagrantfile
vendored
@ -13,21 +13,24 @@ SCRIPT
|
|||||||
|
|
||||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||||
|
|
||||||
config.vm.box = "rustyrobot/deb-jessie-amd64"
|
config.vm.box = "deb/jessie-amd64"
|
||||||
|
#rustyrobot/deb-jessie-amd64"
|
||||||
|
|
||||||
config.vm.define "solar-dev", primary: true do |guest1|
|
config.vm.define "solar-dev", primary: true do |guest1|
|
||||||
guest1.vm.provision "shell", inline: init_script, privileged: true
|
guest1.vm.provision "shell", inline: init_script, privileged: true
|
||||||
guest1.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
|
guest1.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
|
||||||
|
guest1.vm.provision "file", source: "ansible.cfg", destination: "/home/vagrant/.ansible.cfg"
|
||||||
guest1.vm.network "private_network", ip: "10.0.0.2"
|
guest1.vm.network "private_network", ip: "10.0.0.2"
|
||||||
guest1.vm.host_name = "solar-dev"
|
guest1.vm.host_name = "solar-dev"
|
||||||
|
|
||||||
guest1.vm.provider :virtualbox do |v|
|
guest1.vm.provider :virtualbox do |v|
|
||||||
v.customize ["modifyvm", :id, "--memory", 2048]
|
v.customize ["modifyvm", :id, "--memory", 256]
|
||||||
v.name = "solar-dev"
|
v.name = "solar-dev"
|
||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
config.vm.define "solar-dev2" do |guest2|
|
config.vm.define "solar-dev2" do |guest2|
|
||||||
|
guest2.vm.provision "shell", inline: init_script, privileged: true
|
||||||
guest2.vm.network "private_network", ip: "10.0.0.3"
|
guest2.vm.network "private_network", ip: "10.0.0.3"
|
||||||
guest2.vm.host_name = "solar-dev2"
|
guest2.vm.host_name = "solar-dev2"
|
||||||
|
|
||||||
@ -37,4 +40,25 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
|||||||
end
|
end
|
||||||
end
|
end
|
||||||
|
|
||||||
|
config.vm.define "solar-dev3" do |guest3|
|
||||||
|
guest3.vm.provision "shell", inline: init_script, privileged: true
|
||||||
|
guest3.vm.network "private_network", ip: "10.0.0.4"
|
||||||
|
guest3.vm.host_name = "solar-dev3"
|
||||||
|
|
||||||
|
guest3.vm.provider :virtualbox do |v|
|
||||||
|
v.customize ["modifyvm", :id, "--memory", 1024]
|
||||||
|
v.name = "solar-dev3"
|
||||||
|
end
|
||||||
|
end
|
||||||
|
|
||||||
|
config.vm.define "solar-dev4" do |guest4|
|
||||||
|
guest4.vm.provision "shell", inline: init_script, privileged: true
|
||||||
|
guest4.vm.network "private_network", ip: "10.0.0.5"
|
||||||
|
guest4.vm.host_name = "solar-dev4"
|
||||||
|
|
||||||
|
guest4.vm.provider :virtualbox do |v|
|
||||||
|
v.customize ["modifyvm", :id, "--memory", 1024]
|
||||||
|
v.name = "solar-dev4"
|
||||||
|
end
|
||||||
|
end
|
||||||
end
|
end
|
||||||
|
2
ansible.cfg
Normal file
2
ansible.cfg
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
[defaults]
|
||||||
|
host_key_checking = False
|
182
cli.py
Normal file
182
cli.py
Normal file
@ -0,0 +1,182 @@
|
|||||||
|
import click
|
||||||
|
import json
|
||||||
|
#import matplotlib
|
||||||
|
#matplotlib.use('Agg') # don't show windows
|
||||||
|
#import matplotlib.pyplot as plt
|
||||||
|
import networkx as nx
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from x import actions as xa
|
||||||
|
from x import deployment as xd
|
||||||
|
from x import resource as xr
|
||||||
|
from x import signals as xs
|
||||||
|
|
||||||
|
|
||||||
|
@click.group()
|
||||||
|
def cli():
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def init_cli_resource():
|
||||||
|
@click.group()
|
||||||
|
def resource():
|
||||||
|
pass
|
||||||
|
|
||||||
|
cli.add_command(resource)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('resource_path')
|
||||||
|
@click.argument('action_name')
|
||||||
|
def action(action_name, resource_path):
|
||||||
|
print 'action', resource_path, action_name
|
||||||
|
r = xr.load(resource_path)
|
||||||
|
xa.resource_action(r, action_name)
|
||||||
|
|
||||||
|
resource.add_command(action)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('name')
|
||||||
|
@click.argument('base_path')
|
||||||
|
@click.argument('dest_path')
|
||||||
|
@click.argument('args')
|
||||||
|
def create(args, dest_path, base_path, name):
|
||||||
|
print 'create', name, base_path, dest_path, args
|
||||||
|
args = json.loads(args)
|
||||||
|
xr.create(name, base_path, dest_path, args)
|
||||||
|
|
||||||
|
resource.add_command(create)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('resource_path')
|
||||||
|
@click.argument('tag_name')
|
||||||
|
@click.option('--add/--delete', default=True)
|
||||||
|
def tag(add, tag_name, resource_path):
|
||||||
|
print 'Tag', resource_path, tag_name, add
|
||||||
|
r = xr.load(resource_path)
|
||||||
|
if add:
|
||||||
|
r.add_tag(tag_name)
|
||||||
|
else:
|
||||||
|
r.remove_tag(tag_name)
|
||||||
|
r.save()
|
||||||
|
|
||||||
|
resource.add_command(tag)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('path')
|
||||||
|
@click.option('--all/--one', default=False)
|
||||||
|
@click.option('--tag', default=None)
|
||||||
|
def show(tag, all, path):
|
||||||
|
if all or tag:
|
||||||
|
for name, resource in xr.load_all(path).items():
|
||||||
|
show = True
|
||||||
|
if tag:
|
||||||
|
if tag not in resource.tags:
|
||||||
|
show = False
|
||||||
|
|
||||||
|
if show:
|
||||||
|
print resource
|
||||||
|
print
|
||||||
|
else:
|
||||||
|
print xr.load(path)
|
||||||
|
|
||||||
|
resource.add_command(show)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('path')
|
||||||
|
@click.argument('args')
|
||||||
|
def update(args, path):
|
||||||
|
print 'Update', path, args
|
||||||
|
args = json.loads(args)
|
||||||
|
# Need to load all resources for bubbling effect to take place
|
||||||
|
# TODO: resources can be scattered around, this is a simple
|
||||||
|
# situation when we assume resources are all in one directory
|
||||||
|
base_path, name = os.path.split(path)
|
||||||
|
all = xr.load_all(base_path)
|
||||||
|
r = all[name]
|
||||||
|
r.update(args)
|
||||||
|
|
||||||
|
resource.add_command(update)
|
||||||
|
|
||||||
|
|
||||||
|
def init_cli_connect():
|
||||||
|
@click.command()
|
||||||
|
@click.argument('emitter')
|
||||||
|
@click.argument('receiver')
|
||||||
|
@click.option('--mapping', default=None)
|
||||||
|
def connect(mapping, receiver, emitter):
|
||||||
|
print 'Connect', emitter, receiver
|
||||||
|
emitter = xr.load(emitter)
|
||||||
|
receiver = xr.load(receiver)
|
||||||
|
print emitter
|
||||||
|
print receiver
|
||||||
|
if mapping is not None:
|
||||||
|
mapping = json.loads(mapping)
|
||||||
|
xs.connect(emitter, receiver, mapping=mapping)
|
||||||
|
|
||||||
|
cli.add_command(connect)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
@click.argument('emitter')
|
||||||
|
@click.argument('receiver')
|
||||||
|
def disconnect(receiver, emitter):
|
||||||
|
print 'Disconnect', emitter, receiver
|
||||||
|
emitter = xr.load(emitter)
|
||||||
|
receiver = xr.load(receiver)
|
||||||
|
print emitter
|
||||||
|
print receiver
|
||||||
|
xs.disconnect(emitter, receiver)
|
||||||
|
|
||||||
|
cli.add_command(disconnect)
|
||||||
|
|
||||||
|
|
||||||
|
def init_cli_connections():
|
||||||
|
@click.group()
|
||||||
|
def connections():
|
||||||
|
pass
|
||||||
|
|
||||||
|
cli.add_command(connections)
|
||||||
|
|
||||||
|
@click.command()
|
||||||
|
def show():
|
||||||
|
print json.dumps(xs.CLIENTS, indent=2)
|
||||||
|
|
||||||
|
connections.add_command(show)
|
||||||
|
|
||||||
|
# TODO: this requires graphing libraries
|
||||||
|
@click.command()
|
||||||
|
def graph():
|
||||||
|
#g = xs.connection_graph()
|
||||||
|
g = xs.detailed_connection_graph()
|
||||||
|
|
||||||
|
nx.write_dot(g, 'graph.dot')
|
||||||
|
subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png'])
|
||||||
|
|
||||||
|
# Matplotlib
|
||||||
|
#pos = nx.spring_layout(g)
|
||||||
|
#nx.draw_networkx_nodes(g, pos)
|
||||||
|
#nx.draw_networkx_edges(g, pos, arrows=True)
|
||||||
|
#nx.draw_networkx_labels(g, pos)
|
||||||
|
#plt.axis('off')
|
||||||
|
#plt.savefig('graph.png')
|
||||||
|
|
||||||
|
connections.add_command(graph)
|
||||||
|
|
||||||
|
|
||||||
|
def init_cli_deployment_config():
|
||||||
|
@click.command()
|
||||||
|
@click.argument('filepath')
|
||||||
|
def deploy(filepath):
|
||||||
|
print 'Deploying from file {}'.format(filepath)
|
||||||
|
xd.deploy(filepath)
|
||||||
|
|
||||||
|
cli.add_command(deploy)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
init_cli_resource()
|
||||||
|
init_cli_connect()
|
||||||
|
init_cli_connections()
|
||||||
|
init_cli_deployment_config()
|
||||||
|
|
||||||
|
cli()
|
11
compose.yml
11
compose.yml
@ -1,11 +0,0 @@
|
|||||||
---
|
|
||||||
|
|
||||||
- hosts: all
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- shell: docker-compose --version
|
|
||||||
register: compose
|
|
||||||
ignore_errors: true
|
|
||||||
- shell: curl -L https://github.com/docker/compose/releases/download/1.1.0/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
|
|
||||||
when: compose|failed
|
|
||||||
- shell: chmod +x /usr/local/bin/docker-compose
|
|
1
config.yaml
Normal file
1
config.yaml
Normal file
@ -0,0 +1 @@
|
|||||||
|
clients-data-file: /vagrant/clients.json
|
@ -1,67 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
Problem: Different execution strategies
|
|
||||||
---------------------------------------
|
|
||||||
|
|
||||||
We will have different order of execution for different actions
|
|
||||||
(installation, removal, maintenance)
|
|
||||||
|
|
||||||
1. Installation and removal of resources should be done in different order.
|
|
||||||
2. Running maintenance tasks may require completely different order
|
|
||||||
of actions, and this order can not be described one time for resources,
|
|
||||||
it should be described for each action.
|
|
||||||
|
|
||||||
IMPORTANT: In such case resources are making very little sense,
|
|
||||||
because we need to define different dependencies and build different
|
|
||||||
executions graphs for tasks during lifecycle management
|
|
||||||
|
|
||||||
|
|
||||||
Dependency between resources
|
|
||||||
-----------------------------
|
|
||||||
Several options to manage ordering between executables
|
|
||||||
|
|
||||||
1. Allow user to specify this order
|
|
||||||
2. Explicitly set requires/require_for in additional entity like profile
|
|
||||||
3. Deployment flow should reflect data-dependencies between resources
|
|
||||||
|
|
||||||
1st option is pretty clear - and we should provide a way for user
|
|
||||||
to manage dependencies by himself
|
|
||||||
(even if they will lead to error during execution)
|
|
||||||
|
|
||||||
2nd is similar to what is done in fuel, and allows explicitly set
|
|
||||||
what is expected to be executed. However we should
|
|
||||||
not hardcode those deps on resources/actions itself. Because it will lead to
|
|
||||||
tight-coupling, and some workarounds to skip unwanted resource execution.
|
|
||||||
|
|
||||||
3rd option is manage dependencies based on what is provided by different
|
|
||||||
resources. For example input: some_service
|
|
||||||
|
|
||||||
Please note that this format is used only to describe intentions.
|
|
||||||
|
|
||||||
::
|
|
||||||
image:
|
|
||||||
ref:
|
|
||||||
namespace: docker
|
|
||||||
value: base_image
|
|
||||||
|
|
||||||
Practically it means that docker resource should be executed before
|
|
||||||
some_service. And if another_service needs to be connected to some_service
|
|
||||||
|
|
||||||
::
|
|
||||||
connect:
|
|
||||||
ref:
|
|
||||||
namespace: some_service
|
|
||||||
value: port
|
|
||||||
|
|
||||||
But what if there is no data-dependencies?
|
|
||||||
|
|
||||||
In such case we can add generic way to extend parameters with its
|
|
||||||
requirements, like:
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
requires:
|
|
||||||
- ref:
|
|
||||||
namespace: node
|
|
||||||
|
|
||||||
# (dshulyak) How to add backward dependency? (required_for)
|
|
@ -1,33 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
1. Discovery (ansible all -m facter)
|
|
||||||
Read list of ips and store them, and search for different data on those
|
|
||||||
hosts
|
|
||||||
|
|
||||||
2. Create environment ?? with profile, that provides roles (wraps resources)
|
|
||||||
|
|
||||||
3. Add nodes to the env and distribute services
|
|
||||||
|
|
||||||
Assign roles (partitions of services) from the profiles to the nodes.
|
|
||||||
Store history of applied resources.
|
|
||||||
Role only matters as initial template.
|
|
||||||
|
|
||||||
4. Change settings provided by resource.
|
|
||||||
|
|
||||||
Imporant/Non important settings ??
|
|
||||||
We need defaults for some settings.
|
|
||||||
Different templates ?? for different backends of resources ??
|
|
||||||
|
|
||||||
5. Start management
|
|
||||||
|
|
||||||
Periodicly applying stuff ??
|
|
||||||
|
|
||||||
6. Stop management
|
|
||||||
|
|
||||||
We need to be able to stop things
|
|
||||||
|
|
||||||
7. Run maintenance
|
|
||||||
|
|
||||||
Resources should added to history and management graph will be changed
|
|
||||||
|
|
||||||
8. Start management
|
|
@ -1,60 +0,0 @@
|
|||||||
|
|
||||||
|
|
||||||
Profile is a global wrapper for all resources in environment.
|
|
||||||
Profile is versioned and executed by particular driver.
|
|
||||||
Profile is a container for resources.
|
|
||||||
Resources can be grouped by roles entities.
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
id: HA
|
|
||||||
type: profile
|
|
||||||
version: 0.1
|
|
||||||
# adapter for any application that satisfies our requirements
|
|
||||||
driver: ansible
|
|
||||||
|
|
||||||
|
|
||||||
Role is a logical wrapper of resources.
|
|
||||||
We will provide "opinionated" wrappers, but user should
|
|
||||||
be able to compose resource in any way.
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
roles:
|
|
||||||
- id: controller
|
|
||||||
type: role
|
|
||||||
resources: []
|
|
||||||
|
|
||||||
|
|
||||||
Resource should have deployment logic for several events:
|
|
||||||
main deployment, removal of resource, scale up of resource ?
|
|
||||||
Resource should have list of input parameters that resource provides.
|
|
||||||
Resources are isolated, and should be executable as long as
|
|
||||||
required data provided.
|
|
||||||
|
|
||||||
::
|
|
||||||
id: rabbitmq
|
|
||||||
type: resource
|
|
||||||
driver: ansible_playbook
|
|
||||||
actions:
|
|
||||||
run: $install_rabbitmq_playbook
|
|
||||||
input:
|
|
||||||
image: fuel/rabbitmq
|
|
||||||
port: 5572
|
|
||||||
# we need to be able to select ip addresses
|
|
||||||
listen: [{{management.ip}}, {{public.ip}}]
|
|
||||||
|
|
||||||
|
|
||||||
::
|
|
||||||
id: nova_compute
|
|
||||||
type: resource
|
|
||||||
driver: ansible_playbook
|
|
||||||
actions:
|
|
||||||
run: $link_to_ansible_playbook
|
|
||||||
remove: $link_to_another_playbook_that_will_migrate_vms
|
|
||||||
maintenance: $link_to_playbook_that_will_put_into_maintenance
|
|
||||||
input:
|
|
||||||
image: fuel/compute
|
|
||||||
driver: kvm
|
|
||||||
rabbitmq_hosts: []
|
|
||||||
|
|
@ -1,18 +0,0 @@
|
|||||||
|
|
||||||
How to approach primary, non-primary resource mangement?
|
|
||||||
--------------------------------------------------------
|
|
||||||
|
|
||||||
It should be possible to avoid storing primary/non-primary flag
|
|
||||||
for any particular resource.
|
|
||||||
|
|
||||||
In ansible there is a way to execute particular task from playbook
|
|
||||||
only once and on concrete host.
|
|
||||||
|
|
||||||
::
|
|
||||||
- hosts: [mariadb]
|
|
||||||
tasks:
|
|
||||||
- debug: msg="Installing first node"
|
|
||||||
run_once: true
|
|
||||||
delegate_to: groups['mariadb'][0]
|
|
||||||
- debug: msg="Installing all other mariadb nodes"
|
|
||||||
when: inventory_hostname != groups['mariadb'][0]
|
|
@ -1,14 +0,0 @@
|
|||||||
|
|
||||||
Inventory mechanism should provide an easy way for user to change any
|
|
||||||
piece of deployment configuration.
|
|
||||||
|
|
||||||
It means several things:
|
|
||||||
1. When writing modules - developer should take into account possibility
|
|
||||||
of modification it by user. Development may take a little bit longer, but we
|
|
||||||
are developing tool that will cover not single particular use case,
|
|
||||||
but a broad range customized production deployments.
|
|
||||||
|
|
||||||
2. Each resource should define what is changeable.
|
|
||||||
|
|
||||||
On the stage before deployment we will be able to know what resources
|
|
||||||
are used on the level of node/cluster and modify them the way we want.
|
|
@ -1,8 +0,0 @@
|
|||||||
|
|
||||||
Layers
|
|
||||||
|
|
||||||
1. REST API of our CORE service // managing basic information
|
|
||||||
1.1. Extension API // interface for extensions
|
|
||||||
2. Orchestration // run tasks, periodic tasks, lifecycle management ??
|
|
||||||
3. Storage
|
|
||||||
|
|
@ -1,81 +0,0 @@
|
|||||||
|
|
||||||
We should make network as separate resource for which we should be
|
|
||||||
able to add custom handlers.
|
|
||||||
|
|
||||||
This resource will actually serialize tasks, and provide inventory
|
|
||||||
information.
|
|
||||||
|
|
||||||
|
|
||||||
Input:
|
|
||||||
|
|
||||||
Different entities in custom database, like networks and nodes, maybe
|
|
||||||
interfaces and other things.
|
|
||||||
|
|
||||||
Another input is parameters, like ovs/linux (it may be parameters or
|
|
||||||
different tasks)
|
|
||||||
|
|
||||||
Output:
|
|
||||||
|
|
||||||
|
|
||||||
List of ansible tasks for orhestrator to execute, like
|
|
||||||
|
|
||||||
::
|
|
||||||
|
|
||||||
shell: ovs-vsctl add-br {{networks.management.bridge}}
|
|
||||||
|
|
||||||
And data to inventory
|
|
||||||
|
|
||||||
|
|
||||||
Networking entities
|
|
||||||
-----------------------
|
|
||||||
|
|
||||||
Network can have a list of subnets that are attached to different node racks.
|
|
||||||
|
|
||||||
Each subnets stores l3 parameters, such as cidr/ip ranges.
|
|
||||||
L2 parameters such as vlans can be stored on network.
|
|
||||||
|
|
||||||
Roles should be attached to network, and different subnets can not
|
|
||||||
be used as different roles per rack.
|
|
||||||
|
|
||||||
How it should work:
|
|
||||||
|
|
||||||
1. Untagged network created with some l2 parameters like vlan
|
|
||||||
2. Created subnet for this network with params (10.0.0.0/24)
|
|
||||||
3. User attaches network to cluster with roles public/management/storage
|
|
||||||
4. Role can store l2 parameters also (bridge, mtu)
|
|
||||||
5. User creates rack and uses this subnet
|
|
||||||
6. IPs assigned for each node in this rack from each subnet
|
|
||||||
7. During deployment we are creating bridges based on roles.
|
|
||||||
|
|
||||||
URIs
|
|
||||||
-------
|
|
||||||
|
|
||||||
/networks/
|
|
||||||
|
|
||||||
vlan
|
|
||||||
mtu
|
|
||||||
|
|
||||||
/networks/<network_id>/subnets
|
|
||||||
|
|
||||||
cidr
|
|
||||||
ip ranges
|
|
||||||
gateway
|
|
||||||
|
|
||||||
/clusters/<cluster_id>/networks/
|
|
||||||
|
|
||||||
Subset of network attached to cluster
|
|
||||||
|
|
||||||
/clusters/<cluster_id>/networks/<network_id>/network_roles
|
|
||||||
|
|
||||||
Roles attached to particular network
|
|
||||||
|
|
||||||
/network_roles/
|
|
||||||
|
|
||||||
bridge
|
|
||||||
|
|
||||||
/clusters/<cluster_id>/racks/<rack_id>/subnets
|
|
||||||
|
|
||||||
/clusters/<cluster_id>/racks/<rack_id>/nodes
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,94 +0,0 @@
|
|||||||
roles:
|
|
||||||
role-name:
|
|
||||||
name: ""
|
|
||||||
description: ""
|
|
||||||
conflicts:
|
|
||||||
- another_role
|
|
||||||
update_required:
|
|
||||||
- another_role
|
|
||||||
update_once:
|
|
||||||
- another_role
|
|
||||||
has_primary: true
|
|
||||||
limits:
|
|
||||||
min: int OR "<<condition dsl>>"
|
|
||||||
overrides:
|
|
||||||
- condition: "<<condition dsl>>"
|
|
||||||
max: 1
|
|
||||||
- condition: "<<condition dsl>>"
|
|
||||||
reccomended: 3
|
|
||||||
message: ""
|
|
||||||
restrictions:
|
|
||||||
- condition: "<<condition dsl>>"
|
|
||||||
message: ""
|
|
||||||
action: "hide"
|
|
||||||
fault_tolerance: "2%"
|
|
||||||
|
|
||||||
task_groups:
|
|
||||||
#Stages
|
|
||||||
- id: stage_name
|
|
||||||
type: stage
|
|
||||||
requires: [another_stage]
|
|
||||||
#Groups
|
|
||||||
- id: task_group_name
|
|
||||||
type: group
|
|
||||||
role: [role_name]
|
|
||||||
requires: [stage_name_requirement]
|
|
||||||
required_for: [stage_name_complete_before]
|
|
||||||
parameters:
|
|
||||||
strategy:
|
|
||||||
type: one_by_one
|
|
||||||
#OR
|
|
||||||
type: parallel
|
|
||||||
amount: 6 #Optional concurency limit
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- id: task_name_puppet
|
|
||||||
type: puppet
|
|
||||||
role: '*' #optional role to filter task on, used when in a pre or post deployment stage
|
|
||||||
groups: [task_group_name]
|
|
||||||
required_for: [task_name, stage_name]
|
|
||||||
requires: [task_name, task_group_name, stage_name]
|
|
||||||
condition: "<<condition dsl>>"
|
|
||||||
parameters:
|
|
||||||
puppet_manifest: path_to_manifests
|
|
||||||
puppet_modules: path_to_modules
|
|
||||||
timeout: 3600
|
|
||||||
cwd: /
|
|
||||||
test_pre:
|
|
||||||
cmd: bash style exec of command to run
|
|
||||||
test_post:
|
|
||||||
cmd: bash style exec of command to run
|
|
||||||
|
|
||||||
#all have [roles|groups] and requires /// required_for
|
|
||||||
- id: task_name_shell
|
|
||||||
type: shell
|
|
||||||
parameters:
|
|
||||||
cmd: bash style exec
|
|
||||||
timeout: 180
|
|
||||||
retries: 10
|
|
||||||
interval: 2
|
|
||||||
|
|
||||||
- id: task_name_upload_file
|
|
||||||
type: upload_file
|
|
||||||
role: '*'
|
|
||||||
parameters:
|
|
||||||
path: /etc/hiera/nodes.yaml
|
|
||||||
|
|
||||||
- id: task_name_sync
|
|
||||||
type: sync
|
|
||||||
role: '*'
|
|
||||||
parameters:
|
|
||||||
src: rsync://{MASTER_IP}:/puppet/version
|
|
||||||
dst: /etc/puppet
|
|
||||||
timeout: 180
|
|
||||||
|
|
||||||
- id: task_name_copy_files
|
|
||||||
type: copy_files
|
|
||||||
role: '*'
|
|
||||||
parameters:
|
|
||||||
files:
|
|
||||||
- src: source_file/{CLUSTER_ID}/
|
|
||||||
dst: dest/localtion
|
|
||||||
permissions: '0600'
|
|
||||||
dir_permissions: '0700'
|
|
||||||
|
|
@ -1,52 +0,0 @@
|
|||||||
|
|
||||||
Entities
|
|
||||||
------------
|
|
||||||
We clearly need orchestration entities like:
|
|
||||||
1. resources/roles/services/profiles
|
|
||||||
|
|
||||||
Also we need inventory entities:
|
|
||||||
2. nodes/networks/ifaces/cluster/release ?
|
|
||||||
|
|
||||||
Q: how to allow developer to extend this entities by modules?
|
|
||||||
Options:
|
|
||||||
1. Use completely schema-less data model
|
|
||||||
(i personally more comfortable with sql-like data models)
|
|
||||||
2. Dont allow anything except standart entities, if developer needs
|
|
||||||
to manage custom data - he can create its own micro-service and
|
|
||||||
then integrate it via custom type of resource
|
|
||||||
(one which perform query to third-part service)
|
|
||||||
|
|
||||||
|
|
||||||
Identities and namespaces
|
|
||||||
---------------------------
|
|
||||||
Identities required for several reasons:
|
|
||||||
- allow reusage of created once entities
|
|
||||||
- provide clear api to operate with entities
|
|
||||||
- specify dependencies with identities
|
|
||||||
|
|
||||||
Will be root namespace polluted with those entities?
|
|
||||||
|
|
||||||
Options:
|
|
||||||
1. We can create some variable namespace explicitly
|
|
||||||
2. Or use something like namepsace/entity (example contrail/network)
|
|
||||||
|
|
||||||
|
|
||||||
Multiple options for configuration
|
|
||||||
----------------------------------
|
|
||||||
|
|
||||||
If there will be same parameters defined within different
|
|
||||||
modules, how this should behave?
|
|
||||||
|
|
||||||
1. First option is concatenate several options and make a list of choices.
|
|
||||||
2. Raise a validation error that certain thing can be enabled with another.
|
|
||||||
|
|
||||||
Looks like both should be supported.
|
|
||||||
|
|
||||||
|
|
||||||
Deployment code
|
|
||||||
----------------
|
|
||||||
|
|
||||||
We need to be able to expose all functionality of any
|
|
||||||
particular deployment tool.
|
|
||||||
|
|
||||||
Current challenge: how to specify path to some deployment logic?
|
|
92
example.py
Normal file
92
example.py
Normal file
@ -0,0 +1,92 @@
|
|||||||
|
import shutil
|
||||||
|
import os
|
||||||
|
|
||||||
|
from x import resource
|
||||||
|
from x import signals
|
||||||
|
|
||||||
|
|
||||||
|
signals.Connections.clear()
|
||||||
|
|
||||||
|
if os.path.exists('rs'):
|
||||||
|
shutil.rmtree('rs')
|
||||||
|
os.mkdir('rs')
|
||||||
|
|
||||||
|
node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'})
|
||||||
|
node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'})
|
||||||
|
node3 = resource.create('node3', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.5', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'})
|
||||||
|
|
||||||
|
mariadb_service1 = resource.create('mariadb_service1', 'x/resources/mariadb_service', 'rs/', {'image':'mariadb', 'root_password' : 'mariadb', 'port' : '3306', 'ip': '', 'ssh_user': '', 'ssh_key': ''})
|
||||||
|
keystone_db = resource.create('keystone_db', 'x/resources/mariadb_db/', 'rs/', {'db_name':'keystone_db', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''})
|
||||||
|
keystone_db_user = resource.create('keystone_db_user', 'x/resources/mariadb_user/', 'rs/', {'new_user_name' : 'keystone', 'new_user_password' : 'keystone', 'db_name':'', 'login_password':'', 'login_user':'root', 'login_port': '', 'ip':'', 'ssh_user':'', 'ssh_key':''})
|
||||||
|
|
||||||
|
keystone_config1 = resource.create('keystone_config1', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''})
|
||||||
|
keystone_service1 = resource.create('keystone_service1', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''})
|
||||||
|
|
||||||
|
keystone_config2 = resource.create('keystone_config2', 'x/resources/keystone_config/', 'rs/', {'config_dir' : '/etc/solar/keystone', 'ip':'', 'ssh_user':'', 'ssh_key':'', 'admin_token':'admin', 'db_password':'', 'db_name':'', 'db_user':'', 'db_host':''})
|
||||||
|
keystone_service2 = resource.create('keystone_service2', 'x/resources/keystone_service/', 'rs/', {'port':'5000', 'admin_port':'35357', 'ip':'', 'ssh_key':'', 'ssh_user':'', 'config_dir':'', 'config_dir':''})
|
||||||
|
|
||||||
|
|
||||||
|
haproxy_keystone_config = resource.create('haproxy_keystone1_config', 'x/resources/haproxy_config/', 'rs/', {'name':'keystone_config', 'listen_port':'5000', 'servers':[], 'ports':[]})
|
||||||
|
haproxy_config = resource.create('haproxy_config', 'x/resources/haproxy', 'rs/', {'ip':'', 'ssh_key':'', 'ssh_user':'', 'configs_names':[], 'configs_ports':[], 'listen_ports':[], 'configs':[]})
|
||||||
|
haproxy_service = resource.create('haproxy_service', 'x/resources/docker_container/', 'rs/', {'image' : 'tutum/haproxy', 'ports': [], 'host_binds': [], 'volume_binds':[], 'ip':'', 'ssh_key':'', 'ssh_user':''})
|
||||||
|
|
||||||
|
|
||||||
|
####
|
||||||
|
# connections
|
||||||
|
####
|
||||||
|
|
||||||
|
#mariadb
|
||||||
|
signals.connect(node1, mariadb_service1)
|
||||||
|
|
||||||
|
#keystone db
|
||||||
|
signals.connect(node1, keystone_db)
|
||||||
|
signals.connect(mariadb_service1, keystone_db, {'root_password':'login_password', 'port':'login_port'})
|
||||||
|
|
||||||
|
# keystone_db_user
|
||||||
|
signals.connect(node1, keystone_db_user)
|
||||||
|
signals.connect(mariadb_service1, keystone_db_user, {'root_password':'login_password', 'port':'login_port'})
|
||||||
|
signals.connect(keystone_db, keystone_db_user, {'db_name':'db_name'})
|
||||||
|
|
||||||
|
signals.connect(node1, keystone_config1)
|
||||||
|
signals.connect(mariadb_service1, keystone_config1, {'ip':'db_host'})
|
||||||
|
signals.connect(keystone_db_user, keystone_config1, {'db_name':'db_name', 'new_user_name':'db_user', 'new_user_password':'db_password'})
|
||||||
|
|
||||||
|
signals.connect(node1, keystone_service1)
|
||||||
|
signals.connect(keystone_config1, keystone_service1, {'config_dir': 'config_dir'})
|
||||||
|
|
||||||
|
signals.connect(node2, keystone_config2)
|
||||||
|
signals.connect(mariadb_service1, keystone_config2, {'ip':'db_host'})
|
||||||
|
signals.connect(keystone_db_user, keystone_config2, {'db_name':'db_name', 'new_user_name':'db_user', 'new_user_password':'db_password'})
|
||||||
|
|
||||||
|
signals.connect(node2, keystone_service2)
|
||||||
|
signals.connect(keystone_config2, keystone_service2, {'config_dir': 'config_dir'})
|
||||||
|
|
||||||
|
signals.connect(keystone_service1, haproxy_keystone_config, {'ip':'servers', 'port':'ports'})
|
||||||
|
|
||||||
|
signals.connect(node1, haproxy_config)
|
||||||
|
signals.connect(haproxy_keystone_config, haproxy_config, {'listen_port': 'listen_ports', 'name':'configs_names', 'ports' : 'configs_ports', 'servers':'configs'})
|
||||||
|
|
||||||
|
signals.connect(node1, haproxy_service)
|
||||||
|
signals.connect(haproxy_config, haproxy_service, {'listen_ports':'ports', 'config_dir':'host_binds'})
|
||||||
|
|
||||||
|
|
||||||
|
#run
|
||||||
|
from x import actions
|
||||||
|
|
||||||
|
actions.resource_action(mariadb_service1, 'run')
|
||||||
|
actions.resource_action(keystone_db, 'run')
|
||||||
|
actions.resource_action(keystone_db_user, 'run')
|
||||||
|
actions.resource_action(keystone_config1, 'run')
|
||||||
|
actions.resource_action(keystone_service1, 'run')
|
||||||
|
actions.resource_action(haproxy_config, 'run')
|
||||||
|
actions.resource_action(haproxy_service, 'run')
|
||||||
|
|
||||||
|
|
||||||
|
#remove
|
||||||
|
actions.resource_action(haproxy_service, 'remove')
|
||||||
|
actions.resource_action(haproxy_config, 'remove')
|
||||||
|
actions.resource_action(keystone_service1, 'remove')
|
||||||
|
actions.resource_action(keystone_config1, 'remove')
|
||||||
|
actions.resource_action(keystone_db_user, 'remove')
|
||||||
|
actions.resource_action(keystone_db, 'remove')
|
||||||
|
actions.resource_action(mariadb_service1, 'remove')
|
@ -1,14 +0,0 @@
|
|||||||
docker:
|
|
||||||
base_image: ubuntu
|
|
||||||
|
|
||||||
rabbitmq:
|
|
||||||
image: tutum/rabbitmq
|
|
||||||
name: rabbit-test1
|
|
||||||
|
|
||||||
user:
|
|
||||||
name: test_name
|
|
||||||
password: test_pass
|
|
||||||
|
|
||||||
mariadb:
|
|
||||||
name: maria-test
|
|
||||||
image: tutum/mariadb
|
|
@ -1,6 +0,0 @@
|
|||||||
|
|
||||||
networks:
|
|
||||||
default:
|
|
||||||
ip: 10.0.0.2
|
|
||||||
cidr: 10.0.0.0/24
|
|
||||||
interface: eth1
|
|
@ -1,20 +0,0 @@
|
|||||||
|
|
||||||
first ansible_connection=local ansible_ssh_host=10.0.0.2
|
|
||||||
second ansible_ssh_host=10.0.0.3
|
|
||||||
|
|
||||||
[docker]
|
|
||||||
|
|
||||||
first
|
|
||||||
second
|
|
||||||
|
|
||||||
[rabbitmq]
|
|
||||||
|
|
||||||
first
|
|
||||||
|
|
||||||
[user]
|
|
||||||
|
|
||||||
first
|
|
||||||
|
|
||||||
[mariadb]
|
|
||||||
|
|
||||||
first
|
|
@ -1,17 +0,0 @@
|
|||||||
|
|
||||||
- hosts: [service/mariadb]
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- shell: echo {{name}} >> /var/lib/solar/containers_list
|
|
||||||
- shell: docker ps | grep -q {{name}}
|
|
||||||
ignore_errors: true
|
|
||||||
register: is_running
|
|
||||||
- shell: docker run \
|
|
||||||
-d \
|
|
||||||
--net="host" \
|
|
||||||
--privileged \
|
|
||||||
--name {{name}} \
|
|
||||||
-e "MARIADB_ROOT_PASSWORD={{root_password}}" \
|
|
||||||
-e "BIND_ADDRESS={{bind_ip}}" \
|
|
||||||
{{image}}
|
|
||||||
when: is_running|failed
|
|
@ -1,6 +0,0 @@
|
|||||||
|
|
||||||
- hosts: [rabbitmq]
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- shell: docker stop {{ rabbitmq.name }}
|
|
||||||
- shell: docker rm {{ rabbitmq.name }}
|
|
@ -1,6 +0,0 @@
|
|||||||
|
|
||||||
- hosts: [rabbitmq]
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- shell: docker run --net="host" --privileged \
|
|
||||||
--name {{ rabbitmq.name }} -d {{ rabbitmq.image }}
|
|
@ -1,5 +0,0 @@
|
|||||||
|
|
||||||
- include: user/remove.yml
|
|
||||||
- include: rabbitmq/remove.yml
|
|
||||||
- include: mariadb/remove.yml
|
|
||||||
- include: docker/remove.yml
|
|
@ -1,5 +0,0 @@
|
|||||||
|
|
||||||
- include: docker/run.yml
|
|
||||||
- include: rabbitmq/run.yml
|
|
||||||
- include: mariadb/run.yml
|
|
||||||
- include: user/run.yml
|
|
@ -1,12 +0,0 @@
|
|||||||
|
|
||||||
- hosts: [rabbitmq]
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- shell: docker exec -i {{rabbitmq.name}} /usr/sbin/rabbitmqctl delete_user {{user.name}}
|
|
||||||
run_once: true
|
|
||||||
|
|
||||||
- hosts: [mariadb]
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- command: docker exec -t {{mariadb.name}} \
|
|
||||||
mysql -uroot -e "DROP USER '{{user.name}}'"
|
|
@ -1,6 +0,0 @@
|
|||||||
|
|
||||||
- hosts: [rabbitmq]
|
|
||||||
sudo: yes
|
|
||||||
tasks:
|
|
||||||
- command: docker exec -t {{rabbitmq.name}} /usr/sbin/rabbitmqctl add_user {{user.name}} {{user.password}}
|
|
||||||
run_once: true
|
|
46
haproxy.cfg
Normal file
46
haproxy.cfg
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
global
|
||||||
|
log 127.0.0.1 local0
|
||||||
|
log 127.0.0.1 local1 notice
|
||||||
|
maxconn 4096
|
||||||
|
tune.ssl.default-dh-param 2048
|
||||||
|
pidfile /var/run/haproxy.pid
|
||||||
|
user haproxy
|
||||||
|
group haproxy
|
||||||
|
daemon
|
||||||
|
stats socket /var/run/haproxy.stats level admin
|
||||||
|
ssl-default-bind-options no-sslv3
|
||||||
|
|
||||||
|
defaults
|
||||||
|
log global
|
||||||
|
mode http
|
||||||
|
option redispatch
|
||||||
|
option httplog
|
||||||
|
option dontlognull
|
||||||
|
option forwardfor
|
||||||
|
timeout connect 5000
|
||||||
|
timeout client 50000
|
||||||
|
timeout server 50000
|
||||||
|
|
||||||
|
#frontend default_frontend
|
||||||
|
# bind 0.0.0.0:80
|
||||||
|
# default_backend default_service
|
||||||
|
|
||||||
|
#backend default_service
|
||||||
|
# balance roundrobin
|
||||||
|
|
||||||
|
{% for service in haproxy_services %}
|
||||||
|
listen {{ service['name'] }} 0.0.0.0:{{ service['listen_port'] }}
|
||||||
|
mode http
|
||||||
|
stats enable
|
||||||
|
stats uri /haproxy?stats
|
||||||
|
stats realm Strictly\ Private
|
||||||
|
stats auth A_Username:YourPassword
|
||||||
|
stats auth Another_User:passwd
|
||||||
|
balance roundrobin
|
||||||
|
option httpclose
|
||||||
|
option forwardfor
|
||||||
|
{% for server in service['servers'] %}
|
||||||
|
server {{ server['name'] }} {{ server['ip'] }}:{{ server['port'] }} check
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% endfor %}
|
1
haproxy_deployment/__init__.py
Normal file
1
haproxy_deployment/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
__author__ = 'przemek'
|
50
haproxy_deployment/haproxy-deployment.sh
Executable file
50
haproxy_deployment/haproxy-deployment.sh
Executable file
@ -0,0 +1,50 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# HAProxy deployment with Keystone and Nova
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
cd /vagrant
|
||||||
|
|
||||||
|
rm clients.json
|
||||||
|
rm -Rf rs/*
|
||||||
|
|
||||||
|
# Create resources
|
||||||
|
python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
python cli.py resource create node3 x/resources/ro_node/ rs/ '{"ip":"10.0.0.5", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
python cli.py resource create node4 x/resources/ro_node/ rs/ '{"ip":"10.0.0.6", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
python cli.py resource create node5 x/resources/ro_node/ rs/ '{"ip":"10.0.0.7", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
|
||||||
|
python cli.py resource create mariadb_keystone1_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create mariadb_keystone2_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create keystone1 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create keystone2 x/resources/keystone/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create haproxy_keystone_config x/resources/haproxy_config/ rs/ '{"servers": {}, "ssh_user": "", "ssh_key": ""}'
|
||||||
|
|
||||||
|
python cli.py resource create mariadb_nova1_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create mariadb_nova2_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create nova1 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create nova2 x/resources/nova/ rs/ '{"ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
python cli.py resource create haproxy_nova_config x/resources/haproxy_config/ rs/ '{"ip": "", "servers": {}, "ssh_user": "", "ssh_key": ""}'
|
||||||
|
|
||||||
|
python cli.py resource create haproxy x/resources/haproxy/ rs/ '{"ip": "", "configs": {}, "ssh_user": "", "ssh_key": ""}'
|
||||||
|
|
||||||
|
|
||||||
|
# Connect resources
|
||||||
|
python cli.py connect rs/node1 rs/mariadb_keystone1_data
|
||||||
|
python cli.py connect rs/node2 rs/mariadb_keystone2_data
|
||||||
|
python cli.py connect rs/mariadb_keystone1_data rs/keystone1
|
||||||
|
python cli.py connect rs/mariadb_keystone2_data rs/keystone2
|
||||||
|
python cli.py connect rs/keystone1 rs/haproxy_keystone_config --mapping '{"ip": "servers"}'
|
||||||
|
python cli.py connect rs/keystone2 rs/haproxy_keystone_config --mapping '{"ip": "servers"}'
|
||||||
|
|
||||||
|
python cli.py connect rs/node3 rs/mariadb_nova1_data
|
||||||
|
python cli.py connect rs/node4 rs/mariadb_nova2_data
|
||||||
|
python cli.py connect rs/mariadb_nova1_data rs/nova1
|
||||||
|
python cli.py connect rs/mariadb_nova2_data rs/nova2
|
||||||
|
python cli.py connect rs/nova1 rs/haproxy_nova_config --mapping '{"ip": "servers"}'
|
||||||
|
python cli.py connect rs/nova2 rs/haproxy_nova_config --mapping '{"ip": "servers"}'
|
||||||
|
|
||||||
|
python cli.py connect rs/node5 rs/haproxy
|
||||||
|
python cli.py connect rs/haproxy_keystone_config rs/haproxy --mapping '{"server": "configs"}'
|
||||||
|
python cli.py connect rs/haproxy_nova_config rs/haproxy --mapping '{"server": "configs"}'
|
216
haproxy_deployment/haproxy-deployment.yaml
Executable file
216
haproxy_deployment/haproxy-deployment.yaml
Executable file
@ -0,0 +1,216 @@
|
|||||||
|
# HAProxy deployment with MariaDB, Keystone and Nova
|
||||||
|
|
||||||
|
workdir: /vagrant
|
||||||
|
resource-save-path: rs/
|
||||||
|
test-suite: haproxy_deployment.haproxy_deployment
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- name: node1
|
||||||
|
model: x/resources/ro_node/
|
||||||
|
args:
|
||||||
|
ip: 10.0.0.3
|
||||||
|
ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key
|
||||||
|
ssh_user: vagrant
|
||||||
|
- name: node2
|
||||||
|
model: x/resources/ro_node/
|
||||||
|
args:
|
||||||
|
ip: 10.0.0.4
|
||||||
|
ssh_key: /vagrant/.vagrant/machines/solar-dev3/virtualbox/private_key
|
||||||
|
ssh_user: vagrant
|
||||||
|
- name: node3
|
||||||
|
model: x/resources/ro_node/
|
||||||
|
args:
|
||||||
|
ip: 10.0.0.5
|
||||||
|
ssh_key: /vagrant/.vagrant/machines/solar-dev4/virtualbox/private_key
|
||||||
|
ssh_user: vagrant
|
||||||
|
- name: node4
|
||||||
|
model: x/resources/ro_node/
|
||||||
|
args:
|
||||||
|
ip: 10.0.0.6
|
||||||
|
ssh_key: /vagrant/.vagrant/machines/solar-dev5/virtualbox/private_key
|
||||||
|
ssh_user: vagrant
|
||||||
|
- name: node5
|
||||||
|
model: x/resources/ro_node/
|
||||||
|
args:
|
||||||
|
ip: 10.0.0.7
|
||||||
|
ssh_key: /vagrant/.vagrant/machines/solar-dev6/virtualbox/private_key
|
||||||
|
ssh_user: vagrant
|
||||||
|
|
||||||
|
- name: mariadb_keystone1_data
|
||||||
|
model: x/resources/data_container/
|
||||||
|
args:
|
||||||
|
image: mariadb
|
||||||
|
export_volumes:
|
||||||
|
- /var/lib/mysql
|
||||||
|
ip:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: mariadb_keystone2_data
|
||||||
|
model: x/resources/data_container/
|
||||||
|
args:
|
||||||
|
image: mariadb
|
||||||
|
export_volumes:
|
||||||
|
- /var/lib/mysql
|
||||||
|
ip:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: keystone1
|
||||||
|
model: x/resources/keystone/
|
||||||
|
args:
|
||||||
|
admin_port: 35357
|
||||||
|
port: 5000
|
||||||
|
image: TEST
|
||||||
|
config_dir: /etc/solar/keystone1
|
||||||
|
ip:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: keystone2
|
||||||
|
model: x/resources/keystone/
|
||||||
|
args:
|
||||||
|
admin_port: 35357
|
||||||
|
port: 5000
|
||||||
|
config_dir: /etc/solar/keystone2
|
||||||
|
image: TEST
|
||||||
|
ip:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: haproxy_keystone_config
|
||||||
|
model: x/resources/haproxy_config/
|
||||||
|
args:
|
||||||
|
name: keystone
|
||||||
|
servers: []
|
||||||
|
listen_port: 5000
|
||||||
|
ports: []
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
|
||||||
|
- name: mariadb_nova1_data
|
||||||
|
model: x/resources/data_container/
|
||||||
|
args:
|
||||||
|
image: mariadb
|
||||||
|
export_volumes:
|
||||||
|
- /var/lib/mysql
|
||||||
|
ip:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: mariadb_nova2_data
|
||||||
|
model: x/resources/data_container/
|
||||||
|
args:
|
||||||
|
image: mariadb
|
||||||
|
export_volumes:
|
||||||
|
- /var/lib/mysql
|
||||||
|
ip:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: nova1
|
||||||
|
model: x/resources/nova/
|
||||||
|
args:
|
||||||
|
ip:
|
||||||
|
image: TEST
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: nova2
|
||||||
|
model: x/resources/nova/
|
||||||
|
args:
|
||||||
|
ip:
|
||||||
|
image: TEST
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: haproxy_nova_config
|
||||||
|
model: x/resources/haproxy_config/
|
||||||
|
args:
|
||||||
|
name: nova
|
||||||
|
servers: []
|
||||||
|
listen_port: 8774
|
||||||
|
ports: []
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
|
||||||
|
- name: haproxy-config
|
||||||
|
model: x/resources/haproxy/
|
||||||
|
args:
|
||||||
|
ip:
|
||||||
|
listen_ports: []
|
||||||
|
configs: []
|
||||||
|
configs_names: []
|
||||||
|
configs_ports: []
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
- name: haproxy
|
||||||
|
model: x/resources/docker_container
|
||||||
|
args:
|
||||||
|
ip:
|
||||||
|
image: tutum/haproxy
|
||||||
|
ports: []
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
host_binds: []
|
||||||
|
volume_binds: []
|
||||||
|
|
||||||
|
|
||||||
|
connections:
|
||||||
|
- emitter: node1
|
||||||
|
receiver: mariadb_keystone1_data
|
||||||
|
- emitter: node2
|
||||||
|
receiver: mariadb_keystone2_data
|
||||||
|
- emitter: mariadb_keystone1_data
|
||||||
|
receiver: keystone1
|
||||||
|
- emitter: mariadb_keystone2_data
|
||||||
|
receiver: keystone2
|
||||||
|
- emitter: keystone1
|
||||||
|
receiver: haproxy_keystone_config
|
||||||
|
mapping:
|
||||||
|
ip: servers
|
||||||
|
port: ports
|
||||||
|
- emitter: keystone2
|
||||||
|
receiver: haproxy_keystone_config
|
||||||
|
mapping:
|
||||||
|
ip: servers
|
||||||
|
port: ports
|
||||||
|
|
||||||
|
- emitter: node3
|
||||||
|
receiver: mariadb_nova1_data
|
||||||
|
- emitter: node4
|
||||||
|
receiver: mariadb_nova2_data
|
||||||
|
- emitter: mariadb_nova1_data
|
||||||
|
receiver: nova1
|
||||||
|
- emitter: mariadb_nova2_data
|
||||||
|
receiver: nova2
|
||||||
|
- emitter: nova1
|
||||||
|
receiver: haproxy_nova_config
|
||||||
|
mapping:
|
||||||
|
ip: servers
|
||||||
|
port: ports
|
||||||
|
- emitter: nova2
|
||||||
|
receiver: haproxy_nova_config
|
||||||
|
mapping:
|
||||||
|
ip: servers
|
||||||
|
port: ports
|
||||||
|
|
||||||
|
# HAProxy config container
|
||||||
|
- emitter: node5
|
||||||
|
receiver: haproxy-config
|
||||||
|
|
||||||
|
- emitter: haproxy_keystone_config
|
||||||
|
receiver: haproxy-config
|
||||||
|
mapping:
|
||||||
|
listen_port: listen_ports
|
||||||
|
name: configs_names
|
||||||
|
ports: configs_ports
|
||||||
|
servers: configs
|
||||||
|
- emitter: haproxy_nova_config
|
||||||
|
receiver: haproxy-config
|
||||||
|
mapping:
|
||||||
|
listen_port: listen_ports
|
||||||
|
name: configs_names
|
||||||
|
ports: configs_ports
|
||||||
|
servers: configs
|
||||||
|
|
||||||
|
- emitter: haproxy-config
|
||||||
|
receiver: haproxy
|
||||||
|
mapping:
|
||||||
|
ip: ip
|
||||||
|
listen_ports: ports
|
||||||
|
ssh_user: ssh_user
|
||||||
|
ssh_key: ssh_key
|
||||||
|
config_dir: host_binds
|
111
haproxy_deployment/haproxy_deployment.py
Normal file
111
haproxy_deployment/haproxy_deployment.py
Normal file
@ -0,0 +1,111 @@
|
|||||||
|
import unittest
|
||||||
|
|
||||||
|
from x import db
|
||||||
|
|
||||||
|
|
||||||
|
class TestHAProxyDeployment(unittest.TestCase):
|
||||||
|
def test_keystone_config(self):
|
||||||
|
node1 = db.get_resource('node1')
|
||||||
|
node2 = db.get_resource('node2')
|
||||||
|
keystone1 = db.get_resource('keystone1')
|
||||||
|
keystone2 = db.get_resource('keystone2')
|
||||||
|
|
||||||
|
self.assertEqual(keystone1.args['ip'], node1.args['ip'])
|
||||||
|
self.assertEqual(keystone2.args['ip'], node2.args['ip'])
|
||||||
|
|
||||||
|
def test_haproxy_keystone_config(self):
|
||||||
|
keystone1 = db.get_resource('keystone1')
|
||||||
|
keystone2 = db.get_resource('keystone2')
|
||||||
|
haproxy_keystone_config = db.get_resource('haproxy_keystone_config')
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
[ip['value'] for ip in haproxy_keystone_config.args['servers'].value],
|
||||||
|
[
|
||||||
|
keystone1.args['ip'],
|
||||||
|
keystone2.args['ip'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[p['value'] for p in haproxy_keystone_config.args['ports'].value],
|
||||||
|
[
|
||||||
|
keystone1.args['port'],
|
||||||
|
keystone2.args['port'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_nova_config(self):
|
||||||
|
node3 = db.get_resource('node3')
|
||||||
|
node4 = db.get_resource('node4')
|
||||||
|
nova1 = db.get_resource('nova1')
|
||||||
|
nova2 = db.get_resource('nova2')
|
||||||
|
|
||||||
|
self.assertEqual(nova1.args['ip'], node3.args['ip'])
|
||||||
|
self.assertEqual(nova2.args['ip'], node4.args['ip'])
|
||||||
|
|
||||||
|
def test_haproxy_nova_config(self):
|
||||||
|
nova1 = db.get_resource('nova1')
|
||||||
|
nova2 = db.get_resource('nova2')
|
||||||
|
haproxy_nova_config = db.get_resource('haproxy_nova_config')
|
||||||
|
|
||||||
|
self.assertEqual(
|
||||||
|
[ip['value'] for ip in haproxy_nova_config.args['servers'].value],
|
||||||
|
[
|
||||||
|
nova1.args['ip'],
|
||||||
|
nova2.args['ip'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[p['value'] for p in haproxy_nova_config.args['ports'].value],
|
||||||
|
[
|
||||||
|
nova1.args['port'],
|
||||||
|
nova2.args['port'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
def test_haproxy(self):
|
||||||
|
node5 = db.get_resource('node5')
|
||||||
|
haproxy_keystone_config = db.get_resource('haproxy_keystone_config')
|
||||||
|
haproxy_nova_config = db.get_resource('haproxy_nova_config')
|
||||||
|
haproxy = db.get_resource('haproxy')
|
||||||
|
haproxy_config = db.get_resource('haproxy-config')
|
||||||
|
|
||||||
|
self.assertEqual(node5.args['ip'], haproxy.args['ip'])
|
||||||
|
self.assertEqual(node5.args['ssh_key'], haproxy.args['ssh_key'])
|
||||||
|
self.assertEqual(node5.args['ssh_user'], haproxy.args['ssh_user'])
|
||||||
|
self.assertEqual(
|
||||||
|
[c['value'] for c in haproxy_config.args['configs'].value],
|
||||||
|
[
|
||||||
|
haproxy_keystone_config.args['servers'],
|
||||||
|
haproxy_nova_config.args['servers'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[cp['value'] for cp in haproxy_config.args['configs_ports'].value],
|
||||||
|
[
|
||||||
|
haproxy_keystone_config.args['ports'],
|
||||||
|
haproxy_nova_config.args['ports'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[lp['value'] for lp in haproxy_config.args['listen_ports'].value],
|
||||||
|
[
|
||||||
|
haproxy_keystone_config.args['listen_port'],
|
||||||
|
haproxy_nova_config.args['listen_port'],
|
||||||
|
]
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
[
|
||||||
|
haproxy_config.args['config_dir'],
|
||||||
|
],
|
||||||
|
[hb['value'] for hb in haproxy.args['host_binds'].value]
|
||||||
|
)
|
||||||
|
self.assertEqual(
|
||||||
|
haproxy.args['ports'],
|
||||||
|
haproxy_config.args['listen_ports'],
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
loader = unittest.TestLoader()
|
||||||
|
suite = loader.loadTestsFromTestCase(TestHAProxyDeployment)
|
||||||
|
unittest.TextTestRunner().run(suite)
|
13
main.yml
13
main.yml
@ -11,5 +11,16 @@
|
|||||||
- apt: name=virtualenvwrapper state=present
|
- apt: name=virtualenvwrapper state=present
|
||||||
- apt: name=ipython state=present
|
- apt: name=ipython state=present
|
||||||
- apt: name=python-pudb state=present
|
- apt: name=python-pudb state=present
|
||||||
|
- apt: name=python-pip state=present
|
||||||
|
- apt: name=python-mysqldb state=present
|
||||||
|
- shell: pip install docker-py==1.1.0
|
||||||
|
|
||||||
|
# requirements
|
||||||
|
- shell: pip install -r /vagrant/requirements.txt
|
||||||
|
|
||||||
|
# Graph drawing
|
||||||
|
#- apt: name=python-matplotlib state=present
|
||||||
|
- apt: name=python-pygraphviz state=present
|
||||||
|
|
||||||
# Setup development env for solar
|
# Setup development env for solar
|
||||||
- shell: python setup.py develop chdir=/vagrant/solar
|
#- shell: python setup.py develop chdir=/vagrant/solar
|
||||||
|
4
requirements.txt
Normal file
4
requirements.txt
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
click==4.0
|
||||||
|
jinja2==2.7.3
|
||||||
|
networkx==1.9.1
|
||||||
|
PyYAML==3.11
|
43
simple-deployment.yaml
Executable file
43
simple-deployment.yaml
Executable file
@ -0,0 +1,43 @@
|
|||||||
|
# HAProxy deployment with MariaDB, Keystone and Nova
|
||||||
|
|
||||||
|
workdir: /vagrant
|
||||||
|
resource-save-path: rs/
|
||||||
|
#test-suite: haproxy_deployment.haproxy_deployment
|
||||||
|
|
||||||
|
resources:
|
||||||
|
- name: node1
|
||||||
|
model: x/resources/ro_node/
|
||||||
|
args:
|
||||||
|
ip: 10.0.0.3
|
||||||
|
ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key
|
||||||
|
ssh_user: vagrant
|
||||||
|
|
||||||
|
- name: keystone1
|
||||||
|
model: x/resources/keystone/
|
||||||
|
args:
|
||||||
|
ip:
|
||||||
|
image: TEST
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
|
||||||
|
- name: haproxy_keystone_config
|
||||||
|
model: x/resources/haproxy_config/
|
||||||
|
args:
|
||||||
|
listen_port: 5000
|
||||||
|
ports: {}
|
||||||
|
servers: {}
|
||||||
|
|
||||||
|
|
||||||
|
connections:
|
||||||
|
- emitter: node1
|
||||||
|
receiver: keystone1
|
||||||
|
|
||||||
|
# Multiple subscription test
|
||||||
|
- emitter: node1
|
||||||
|
receiver: keystone1
|
||||||
|
|
||||||
|
- emitter: keystone1
|
||||||
|
receiver: haproxy_keystone_config
|
||||||
|
mapping:
|
||||||
|
ip: servers
|
||||||
|
port: ports
|
@ -1,2 +0,0 @@
|
|||||||
include *.txt
|
|
||||||
recursive-include solar/ *
|
|
@ -1,50 +0,0 @@
|
|||||||
# Copyright 2015 Mirantis, Inc.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
from setuptools import find_packages
|
|
||||||
from setuptools import setup
|
|
||||||
|
|
||||||
def find_requires():
|
|
||||||
prj_root = os.path.dirname(os.path.realpath(__file__))
|
|
||||||
requirements = []
|
|
||||||
with open(u'{0}/requirements.txt'.format(prj_root), 'r') as reqs:
|
|
||||||
requirements = reqs.readlines()
|
|
||||||
return requirements
|
|
||||||
|
|
||||||
|
|
||||||
setup(
|
|
||||||
name='solar',
|
|
||||||
version='0.0.1',
|
|
||||||
description='Deployment tool',
|
|
||||||
long_description="""Deployment tool""",
|
|
||||||
classifiers=[
|
|
||||||
"Development Status :: 1 - Beta",
|
|
||||||
"License :: OSI Approved :: Apache Software License",
|
|
||||||
"Programming Language :: Python",
|
|
||||||
"Programming Language :: Python :: 2.6",
|
|
||||||
"Programming Language :: Python :: 2.7",
|
|
||||||
"Topic :: System :: Software Distribution"],
|
|
||||||
author='Mirantis Inc.',
|
|
||||||
author_email='product@mirantis.com',
|
|
||||||
url='http://mirantis.com',
|
|
||||||
keywords='deployment',
|
|
||||||
packages=find_packages(),
|
|
||||||
zip_safe=False,
|
|
||||||
install_requires=find_requires(),
|
|
||||||
include_package_data=True,
|
|
||||||
entry_points={
|
|
||||||
'console_scripts': [
|
|
||||||
'solar = solar.cli:main']})
|
|
@ -1,17 +0,0 @@
|
|||||||
from solar import extensions
|
|
||||||
from solar import errors
|
|
||||||
|
|
||||||
|
|
||||||
class ExtensionsManager(object):
|
|
||||||
|
|
||||||
def __init__(self, profile):
|
|
||||||
self.profile = profile
|
|
||||||
|
|
||||||
def get_data(self, key):
|
|
||||||
"""Finds data by extensions provider"""
|
|
||||||
providers = filter(lambda e: key in e.PROVIDES, extensions.get_all_extensions())
|
|
||||||
|
|
||||||
if not providers:
|
|
||||||
raise errors.CannotFindExtension('Cannot find extension which provides "{0}"'.format(key))
|
|
||||||
|
|
||||||
return getattr(providers[0](self.profile), key)()
|
|
@ -1,10 +0,0 @@
|
|||||||
|
|
||||||
class Profile(object):
|
|
||||||
|
|
||||||
def __init__(self, profile):
|
|
||||||
self._profile = profile
|
|
||||||
self.tags = set(profile['tags'])
|
|
||||||
self.extensions = profile.get('extensions', [])
|
|
||||||
|
|
||||||
def get(self, key):
|
|
||||||
return self._profile.get(key, None)
|
|
@ -1,10 +0,0 @@
|
|||||||
class SolarError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CannotFindID(SolarError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CannotFindExtension(SolarError):
|
|
||||||
pass
|
|
@ -1,28 +0,0 @@
|
|||||||
from solar.interfaces.db import get_db
|
|
||||||
|
|
||||||
|
|
||||||
class BaseExtension(object):
|
|
||||||
|
|
||||||
ID = None
|
|
||||||
NAME = None
|
|
||||||
PROVIDES = []
|
|
||||||
|
|
||||||
def __init__(self, profile, core_manager=None, config=None):
|
|
||||||
self.config = config or {}
|
|
||||||
self.uid = self.ID
|
|
||||||
self.db = get_db()
|
|
||||||
self.profile = profile
|
|
||||||
|
|
||||||
from solar.core.extensions_manager import ExtensionsManager
|
|
||||||
self.core = core_manager or ExtensionsManager(self.profile)
|
|
||||||
|
|
||||||
def prepare(self):
|
|
||||||
"""Make some changes in database state."""
|
|
||||||
|
|
||||||
@property
|
|
||||||
def input(self):
|
|
||||||
return self.config.get('input', {})
|
|
||||||
|
|
||||||
@property
|
|
||||||
def output(self):
|
|
||||||
return self.config.get('output', {})
|
|
@ -1,9 +0,0 @@
|
|||||||
from solar.interfaces.db.file_system_db import FileSystemDB
|
|
||||||
|
|
||||||
mapping = {
|
|
||||||
'file_system': FileSystemDB
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_db():
|
|
||||||
# Should be retrieved from config
|
|
||||||
return mapping['file_system']()
|
|
0
solar/solar/third_party/__init__.py
vendored
0
solar/solar/third_party/__init__.py
vendored
303
solar/solar/third_party/dir_dbm.py
vendored
303
solar/solar/third_party/dir_dbm.py
vendored
@ -1,303 +0,0 @@
|
|||||||
# -*- test-case-name: twisted.test.test_dirdbm -*-
|
|
||||||
#
|
|
||||||
# Copyright (c) Twisted Matrix Laboratories.
|
|
||||||
# See LICENSE for details.
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
DBM-style interface to a directory.
|
|
||||||
Each key is stored as a single file. This is not expected to be very fast or
|
|
||||||
efficient, but it's good for easy debugging.
|
|
||||||
DirDBMs are *not* thread-safe, they should only be accessed by one thread at
|
|
||||||
a time.
|
|
||||||
No files should be placed in the working directory of a DirDBM save those
|
|
||||||
created by the DirDBM itself!
|
|
||||||
Maintainer: Itamar Shtull-Trauring
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
import os
|
|
||||||
import types
|
|
||||||
import base64
|
|
||||||
import glob
|
|
||||||
|
|
||||||
try:
|
|
||||||
import cPickle as pickle
|
|
||||||
except ImportError:
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
try:
|
|
||||||
_open
|
|
||||||
except NameError:
|
|
||||||
_open = open
|
|
||||||
|
|
||||||
|
|
||||||
class DirDBM(object):
|
|
||||||
"""A directory with a DBM interface.
|
|
||||||
|
|
||||||
This class presents a hash-like interface to a directory of small,
|
|
||||||
flat files. It can only use strings as keys or values.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, name):
|
|
||||||
"""
|
|
||||||
@type name: str
|
|
||||||
@param name: Base path to use for the directory storage.
|
|
||||||
"""
|
|
||||||
self.dname = os.path.abspath(name)
|
|
||||||
if not os.path.isdir(self.dname):
|
|
||||||
os.mkdir(self.dname)
|
|
||||||
else:
|
|
||||||
# Run recovery, in case we crashed. we delete all files ending
|
|
||||||
# with ".new". Then we find all files who end with ".rpl". If a
|
|
||||||
# corresponding file exists without ".rpl", we assume the write
|
|
||||||
# failed and delete the ".rpl" file. If only a ".rpl" exist we
|
|
||||||
# assume the program crashed right after deleting the old entry
|
|
||||||
# but before renaming the replacement entry.
|
|
||||||
#
|
|
||||||
# NOTE: '.' is NOT in the base64 alphabet!
|
|
||||||
for f in glob.glob(os.path.join(self.dname, "*.new")):
|
|
||||||
os.remove(f)
|
|
||||||
replacements = glob.glob(os.path.join(self.dname, "*.rpl"))
|
|
||||||
for f in replacements:
|
|
||||||
old = f[:-4]
|
|
||||||
if os.path.exists(old):
|
|
||||||
os.remove(f)
|
|
||||||
else:
|
|
||||||
os.rename(f, old)
|
|
||||||
|
|
||||||
def _encode(self, k):
|
|
||||||
"""Encode a key so it can be used as a filename.
|
|
||||||
"""
|
|
||||||
# NOTE: '_' is NOT in the base64 alphabet!
|
|
||||||
return base64.encodestring(k).replace('\n', '_').replace("/", "-")
|
|
||||||
|
|
||||||
def _decode(self, k):
|
|
||||||
"""Decode a filename to get the key.
|
|
||||||
"""
|
|
||||||
return base64.decodestring(k.replace('_', '\n').replace("-", "/"))
|
|
||||||
|
|
||||||
def _readFile(self, path):
|
|
||||||
"""Read in the contents of a file.
|
|
||||||
|
|
||||||
Override in subclasses to e.g. provide transparently encrypted dirdbm.
|
|
||||||
"""
|
|
||||||
f = _open(path, "rb")
|
|
||||||
s = f.read()
|
|
||||||
f.close()
|
|
||||||
return s
|
|
||||||
|
|
||||||
def _writeFile(self, path, data):
|
|
||||||
"""Write data to a file.
|
|
||||||
|
|
||||||
Override in subclasses to e.g. provide transparently encrypted dirdbm.
|
|
||||||
"""
|
|
||||||
f = _open(path, "wb")
|
|
||||||
f.write(data)
|
|
||||||
f.flush()
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
def __len__(self):
|
|
||||||
"""
|
|
||||||
@return: The number of key/value pairs in this Shelf
|
|
||||||
"""
|
|
||||||
return len(os.listdir(self.dname))
|
|
||||||
|
|
||||||
def __setitem__(self, k, v):
|
|
||||||
"""
|
|
||||||
C{dirdbm[k] = v}
|
|
||||||
Create or modify a textfile in this directory
|
|
||||||
@type k: str
|
|
||||||
@param k: key to set
|
|
||||||
|
|
||||||
@type v: str
|
|
||||||
@param v: value to associate with C{k}
|
|
||||||
"""
|
|
||||||
assert type(k) == types.StringType, "DirDBM key must be a string"
|
|
||||||
# NOTE: Can be not a string if _writeFile in the child is redefined
|
|
||||||
# assert type(v) == types.StringType, "DirDBM value must be a string"
|
|
||||||
k = self._encode(k)
|
|
||||||
|
|
||||||
# we create a new file with extension .new, write the data to it, and
|
|
||||||
# if the write succeeds delete the old file and rename the new one.
|
|
||||||
old = os.path.join(self.dname, k)
|
|
||||||
if os.path.exists(old):
|
|
||||||
new = old + ".rpl" # replacement entry
|
|
||||||
else:
|
|
||||||
new = old + ".new" # new entry
|
|
||||||
try:
|
|
||||||
self._writeFile(new, v)
|
|
||||||
except:
|
|
||||||
os.remove(new)
|
|
||||||
raise
|
|
||||||
else:
|
|
||||||
if os.path.exists(old): os.remove(old)
|
|
||||||
os.rename(new, old)
|
|
||||||
|
|
||||||
def __getitem__(self, k):
|
|
||||||
"""
|
|
||||||
C{dirdbm[k]}
|
|
||||||
Get the contents of a file in this directory as a string.
|
|
||||||
|
|
||||||
@type k: str
|
|
||||||
@param k: key to lookup
|
|
||||||
|
|
||||||
@return: The value associated with C{k}
|
|
||||||
@raise KeyError: Raised when there is no such key
|
|
||||||
"""
|
|
||||||
assert type(k) == types.StringType, "DirDBM key must be a string"
|
|
||||||
path = os.path.join(self.dname, self._encode(k))
|
|
||||||
try:
|
|
||||||
return self._readFile(path)
|
|
||||||
except:
|
|
||||||
raise KeyError, k
|
|
||||||
|
|
||||||
def __delitem__(self, k):
|
|
||||||
"""
|
|
||||||
C{del dirdbm[foo]}
|
|
||||||
Delete a file in this directory.
|
|
||||||
|
|
||||||
@type k: str
|
|
||||||
@param k: key to delete
|
|
||||||
|
|
||||||
@raise KeyError: Raised when there is no such key
|
|
||||||
"""
|
|
||||||
assert type(k) == types.StringType, "DirDBM key must be a string"
|
|
||||||
k = self._encode(k)
|
|
||||||
try: os.remove(os.path.join(self.dname, k))
|
|
||||||
except (OSError, IOError): raise KeyError(self._decode(k))
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
"""
|
|
||||||
@return: a C{list} of filenames (keys).
|
|
||||||
"""
|
|
||||||
return map(self._decode, os.listdir(self.dname))
|
|
||||||
|
|
||||||
def values(self):
|
|
||||||
"""
|
|
||||||
@return: a C{list} of file-contents (values).
|
|
||||||
"""
|
|
||||||
vals = []
|
|
||||||
keys = self.keys()
|
|
||||||
for key in keys:
|
|
||||||
vals.append(self[key])
|
|
||||||
return vals
|
|
||||||
|
|
||||||
def items(self):
|
|
||||||
"""
|
|
||||||
@return: a C{list} of 2-tuples containing key/value pairs.
|
|
||||||
"""
|
|
||||||
items = []
|
|
||||||
keys = self.keys()
|
|
||||||
for key in keys:
|
|
||||||
items.append((key, self[key]))
|
|
||||||
return items
|
|
||||||
|
|
||||||
def has_key(self, key):
|
|
||||||
"""
|
|
||||||
@type key: str
|
|
||||||
@param key: The key to test
|
|
||||||
|
|
||||||
@return: A true value if this dirdbm has the specified key, a faluse
|
|
||||||
value otherwise.
|
|
||||||
"""
|
|
||||||
assert type(key) == types.StringType, "DirDBM key must be a string"
|
|
||||||
key = self._encode(key)
|
|
||||||
return os.path.isfile(os.path.join(self.dname, key))
|
|
||||||
|
|
||||||
def setdefault(self, key, value):
|
|
||||||
"""
|
|
||||||
@type key: str
|
|
||||||
@param key: The key to lookup
|
|
||||||
|
|
||||||
@param value: The value to associate with key if key is not already
|
|
||||||
associated with a value.
|
|
||||||
"""
|
|
||||||
if not self.has_key(key):
|
|
||||||
self[key] = value
|
|
||||||
return value
|
|
||||||
return self[key]
|
|
||||||
|
|
||||||
def get(self, key, default = None):
|
|
||||||
"""
|
|
||||||
@type key: str
|
|
||||||
@param key: The key to lookup
|
|
||||||
|
|
||||||
@param default: The value to return if the given key does not exist
|
|
||||||
|
|
||||||
@return: The value associated with C{key} or C{default} if not
|
|
||||||
C{self.has_key(key)}
|
|
||||||
"""
|
|
||||||
if self.has_key(key):
|
|
||||||
return self[key]
|
|
||||||
else:
|
|
||||||
return default
|
|
||||||
|
|
||||||
def __contains__(self, key):
|
|
||||||
"""
|
|
||||||
C{key in dirdbm}
|
|
||||||
@type key: str
|
|
||||||
@param key: The key to test
|
|
||||||
|
|
||||||
@return: A true value if C{self.has_key(key)}, a false value otherwise.
|
|
||||||
"""
|
|
||||||
assert type(key) == types.StringType, "DirDBM key must be a string"
|
|
||||||
key = self._encode(key)
|
|
||||||
return os.path.isfile(os.path.join(self.dname, key))
|
|
||||||
|
|
||||||
def update(self, dict):
|
|
||||||
"""
|
|
||||||
Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting
|
|
||||||
keys will be overwritten with the values from C{dict}.
|
|
||||||
@type dict: mapping
|
|
||||||
@param dict: A mapping of key/value pairs to add to this dirdbm.
|
|
||||||
"""
|
|
||||||
for key, val in dict.items():
|
|
||||||
self[key]=val
|
|
||||||
|
|
||||||
def copyTo(self, path):
|
|
||||||
"""
|
|
||||||
Copy the contents of this dirdbm to the dirdbm at C{path}.
|
|
||||||
|
|
||||||
@type path: C{str}
|
|
||||||
@param path: The path of the dirdbm to copy to. If a dirdbm
|
|
||||||
exists at the destination path, it is cleared first.
|
|
||||||
|
|
||||||
@rtype: C{DirDBM}
|
|
||||||
@return: The dirdbm this dirdbm was copied to.
|
|
||||||
"""
|
|
||||||
path = os.path.abspath(path)
|
|
||||||
assert path != self.dname
|
|
||||||
|
|
||||||
d = self.__class__(path)
|
|
||||||
d.clear()
|
|
||||||
for k in self.keys():
|
|
||||||
d[k] = self[k]
|
|
||||||
return d
|
|
||||||
|
|
||||||
def clear(self):
|
|
||||||
"""
|
|
||||||
Delete all key/value pairs in this dirdbm.
|
|
||||||
"""
|
|
||||||
for k in self.keys():
|
|
||||||
del self[k]
|
|
||||||
|
|
||||||
def close(self):
|
|
||||||
"""
|
|
||||||
Close this dbm: no-op, for dbm-style interface compliance.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def getModificationTime(self, key):
|
|
||||||
"""
|
|
||||||
Returns modification time of an entry.
|
|
||||||
|
|
||||||
@return: Last modification date (seconds since epoch) of entry C{key}
|
|
||||||
@raise KeyError: Raised when there is no such key
|
|
||||||
"""
|
|
||||||
assert type(key) == types.StringType, "DirDBM key must be a string"
|
|
||||||
path = os.path.join(self.dname, self._encode(key))
|
|
||||||
if os.path.isfile(path):
|
|
||||||
return os.path.getmtime(path)
|
|
||||||
else:
|
|
||||||
raise KeyError, key
|
|
118
x/README.md
Normal file
118
x/README.md
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
# x
|
||||||
|
|
||||||
|
## HAProxy deployment
|
||||||
|
|
||||||
|
```
|
||||||
|
cd /vagrant
|
||||||
|
python cli.py deploy haproxy_deployment/haproxy-deployment.yaml
|
||||||
|
```
|
||||||
|
|
||||||
|
or from Python shell:
|
||||||
|
|
||||||
|
```
|
||||||
|
from x import deployment
|
||||||
|
|
||||||
|
deployment.deploy('/vagrant/haproxy_deployment/haproxy-deployment.yaml')
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage:
|
||||||
|
|
||||||
|
Creating resources:
|
||||||
|
|
||||||
|
```
|
||||||
|
from x import resource
|
||||||
|
|
||||||
|
node1 = resource.create('node1', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.3', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'})
|
||||||
|
|
||||||
|
node2 = resource.create('node2', 'x/resources/ro_node/', 'rs/', {'ip':'10.0.0.4', 'ssh_key' : '/vagrant/tmp/keys/ssh_private', 'ssh_user':'vagrant'})
|
||||||
|
|
||||||
|
keystone_db_data = resource.create('mariadb_keystone_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'ip': '', 'ssh_user': '', 'ssh_key': ''}, connections={'ip' : 'node2.ip', 'ssh_key':'node2.ssh_key', 'ssh_user':'node2.ssh_user'})
|
||||||
|
|
||||||
|
nova_db_data = resource.create('mariadb_nova_data', 'x/resources/data_container/', 'rs/', {'image' : 'mariadb', 'export_volumes' : ['/var/lib/mysql'], 'ip': '', 'ssh_user': '', 'ssh_key': ''}, connections={'ip' : 'node1.ip', 'ssh_key':'node1.ssh_key', 'ssh_user':'node1.ssh_user'})
|
||||||
|
```
|
||||||
|
|
||||||
|
to make connection after resource is created use `signal.connect`
|
||||||
|
|
||||||
|
To test notifications:
|
||||||
|
|
||||||
|
```
|
||||||
|
keystone_db_data.args # displays node2 IP
|
||||||
|
|
||||||
|
node2.update({'ip': '10.0.0.5'})
|
||||||
|
|
||||||
|
keystone_db_data.args # updated IP
|
||||||
|
```
|
||||||
|
|
||||||
|
If you close the Python shell you can load the resources like this:
|
||||||
|
|
||||||
|
```
|
||||||
|
from x import resource
|
||||||
|
|
||||||
|
node1 = resource.load('rs/node1')
|
||||||
|
|
||||||
|
node2 = resource.load('rs/node2')
|
||||||
|
|
||||||
|
keystone_db_data = resource.load('rs/mariadn_keystone_data')
|
||||||
|
|
||||||
|
nova_db_data = resource.load('rs/mariadb_nova_data')
|
||||||
|
```
|
||||||
|
|
||||||
|
Connections are loaded automatically.
|
||||||
|
|
||||||
|
|
||||||
|
You can also load all resources at once:
|
||||||
|
|
||||||
|
```
|
||||||
|
from x import resource
|
||||||
|
|
||||||
|
all_resources = resource.load_all('rs')
|
||||||
|
```
|
||||||
|
|
||||||
|
## CLI
|
||||||
|
|
||||||
|
You can do the above from the command-line client:
|
||||||
|
|
||||||
|
```
|
||||||
|
cd /vagrant
|
||||||
|
|
||||||
|
python cli.py resource create node1 x/resources/ro_node/ rs/ '{"ip":"10.0.0.3", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
|
||||||
|
python cli.py resource create node2 x/resources/ro_node/ rs/ '{"ip":"10.0.0.4", "ssh_key" : "/vagrant/tmp/keys/ssh_private", "ssh_user":"vagrant"}'
|
||||||
|
|
||||||
|
python cli.py resource create mariadb_keystone_data x/resources/data_container/ rs/ '{"image": "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
|
||||||
|
python cli.py resource create mariadb_nova_data x/resources/data_container/ rs/ '{"image" : "mariadb", "export_volumes" : ["/var/lib/mysql"], "ip": "", "ssh_user": "", "ssh_key": ""}'
|
||||||
|
|
||||||
|
# View resources
|
||||||
|
python cli.py resource show rs/mariadb_keystone_data
|
||||||
|
|
||||||
|
# Show all resources at location rs/
|
||||||
|
python cli.py resource show rs/ --all
|
||||||
|
|
||||||
|
# Show resources with specific tag
|
||||||
|
python cli.py resources show rs/ --tag test
|
||||||
|
|
||||||
|
# Connect resources
|
||||||
|
python cli.py connect rs/node2 rs/mariadb_keystone_data
|
||||||
|
|
||||||
|
python cli.py connect rs/node1 rs/mariadb_nova_data
|
||||||
|
|
||||||
|
# Test update
|
||||||
|
python cli.py update rs/node2 '{"ip": "1.1.1.1"}'
|
||||||
|
python cli.py resource show rs/mariadb_keystone_data # --> IP is 1.1.1.1
|
||||||
|
|
||||||
|
# View connections
|
||||||
|
python cli.py connections show
|
||||||
|
|
||||||
|
# Outputs graph to 'graph.png' file, please note that arrows don't have "normal" pointers, but just the line is thicker
|
||||||
|
# please see http://networkx.lanl.gov/_modules/networkx/drawing/nx_pylab.html
|
||||||
|
python cli.py connections graph
|
||||||
|
|
||||||
|
# Disconnect
|
||||||
|
python cli.py disconnect rs/mariadb_nova_data rs/node1
|
||||||
|
|
||||||
|
# Tag a resource:
|
||||||
|
python cli.py resource tag rs/node1 test-tag
|
||||||
|
# Remove tag
|
||||||
|
python cli.py resource tag rs/node1 test-tag --delete
|
||||||
|
```
|
18
x/TODO.md
Normal file
18
x/TODO.md
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# TODO
|
||||||
|
|
||||||
|
- store all resource configurations somewhere globally (this is required to
|
||||||
|
correctly perform an update on one resource and bubble down to all others)
|
||||||
|
- config templates
|
||||||
|
- Handler also can require some data, for example ansible: ip, ssh_key, ssh_user
|
||||||
|
- tag-filtered graph generation
|
||||||
|
- separate resource for docker image -- this is e.g. to make automatic image removal
|
||||||
|
when some image is unused to conserve space
|
||||||
|
|
||||||
|
# DONE
|
||||||
|
- Deploy HAProxy, Keystone and MariaDB
|
||||||
|
- ansible handler (loles)
|
||||||
|
- tags are kept in resource mata file (pkaminski)
|
||||||
|
- add 'list' connection type (pkaminski)
|
||||||
|
- connections are made automaticly(pkaminski)
|
||||||
|
- graph is build from CLIENT dict, clients are stored in JSON file (pkaminski)
|
||||||
|
- cli (pkaminski)
|
13
x/actions.py
Normal file
13
x/actions.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
import handlers
|
||||||
|
|
||||||
|
|
||||||
|
def resource_action(resource, action):
|
||||||
|
handler = resource.metadata['handler']
|
||||||
|
with handlers.get(handler)([resource]) as h:
|
||||||
|
h.action(resource, action)
|
||||||
|
|
||||||
|
|
||||||
|
def tag_action(tag, action):
|
||||||
|
#TODO
|
||||||
|
pass
|
19
x/db.py
Normal file
19
x/db.py
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
|
||||||
|
RESOURCE_DB = {}
|
||||||
|
|
||||||
|
|
||||||
|
def resource_add(key, value):
|
||||||
|
if key in RESOURCE_DB:
|
||||||
|
raise Exception('Key `{0}` already exists'.format(key))
|
||||||
|
RESOURCE_DB[key] = value
|
||||||
|
|
||||||
|
|
||||||
|
def get_resource(key):
|
||||||
|
return RESOURCE_DB.get(key, None)
|
||||||
|
|
||||||
|
|
||||||
|
def clear():
|
||||||
|
global RESOURCE_DB
|
||||||
|
|
||||||
|
RESOURCE_DB = {}
|
45
x/deployment.py
Normal file
45
x/deployment.py
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
# Deploying stuff from YAML definition
|
||||||
|
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from x import db
|
||||||
|
from x import resource as xr
|
||||||
|
from x import signals as xs
|
||||||
|
|
||||||
|
|
||||||
|
def deploy(filename):
|
||||||
|
with open(filename) as f:
|
||||||
|
config = yaml.load(f)
|
||||||
|
|
||||||
|
workdir = config['workdir']
|
||||||
|
resource_save_path = os.path.join(workdir, config['resource-save-path'])
|
||||||
|
|
||||||
|
# Clean stuff first
|
||||||
|
db.clear()
|
||||||
|
xs.Connections.clear()
|
||||||
|
shutil.rmtree(resource_save_path, ignore_errors=True)
|
||||||
|
os.makedirs(resource_save_path)
|
||||||
|
|
||||||
|
# Create resources first
|
||||||
|
for resource_definition in config['resources']:
|
||||||
|
name = resource_definition['name']
|
||||||
|
model = os.path.join(workdir, resource_definition['model'])
|
||||||
|
args = resource_definition.get('args', {})
|
||||||
|
print 'Creating ', name, model, resource_save_path, args
|
||||||
|
xr.create(name, model, resource_save_path, args=args)
|
||||||
|
|
||||||
|
# Create resource connections
|
||||||
|
for connection in config['connections']:
|
||||||
|
emitter = db.get_resource(connection['emitter'])
|
||||||
|
receiver = db.get_resource(connection['receiver'])
|
||||||
|
mapping = connection.get('mapping')
|
||||||
|
print 'Connecting ', emitter.name, receiver.name, mapping
|
||||||
|
xs.connect(emitter, receiver, mapping=mapping)
|
||||||
|
|
||||||
|
# Run all tests
|
||||||
|
if 'test-suite' in config:
|
||||||
|
print 'Running tests from {}'.format(config['test-suite'])
|
||||||
|
test_suite = __import__(config['test-suite'], {}, {}, ['main'])
|
||||||
|
test_suite.main()
|
15
x/handlers/__init__.py
Normal file
15
x/handlers/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
from x.handlers.ansible import Ansible
|
||||||
|
from x.handlers.base import Empty
|
||||||
|
from x.handlers.shell import Shell
|
||||||
|
|
||||||
|
|
||||||
|
HANDLERS = {'ansible': Ansible,
|
||||||
|
'shell': Shell,
|
||||||
|
'none': Empty}
|
||||||
|
|
||||||
|
def get(handler_name):
|
||||||
|
handler = HANDLERS.get(handler_name, None)
|
||||||
|
if handler:
|
||||||
|
return handler
|
||||||
|
raise Exception('Handler {0} does not exist'.format(handler_name))
|
39
x/handlers/ansible.py
Normal file
39
x/handlers/ansible.py
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from x.handlers.base import BaseHandler
|
||||||
|
|
||||||
|
|
||||||
|
class Ansible(BaseHandler):
|
||||||
|
def action(self, resource, action_name):
|
||||||
|
inventory_file = self._create_inventory(resource)
|
||||||
|
playbook_file = self._create_playbook(resource, action_name)
|
||||||
|
print 'inventory_file', inventory_file
|
||||||
|
print 'playbook_file', playbook_file
|
||||||
|
call_args = ['ansible-playbook', '-i', inventory_file, playbook_file]
|
||||||
|
print 'EXECUTING: ', ' '.join(call_args)
|
||||||
|
subprocess.call(call_args)
|
||||||
|
|
||||||
|
#def _get_connection(self, resource):
|
||||||
|
# return {'ssh_user': '',
|
||||||
|
# 'ssh_key': '',
|
||||||
|
# 'host': ''}
|
||||||
|
|
||||||
|
def _create_inventory(self, r):
|
||||||
|
inventory = '{0} ansible_ssh_host={1} ansible_connection=ssh ansible_ssh_user={2} ansible_ssh_private_key_file={3}'
|
||||||
|
host, user, ssh_key = r.args['ip'].value, r.args['ssh_user'].value, r.args['ssh_key'].value
|
||||||
|
print host
|
||||||
|
print user
|
||||||
|
print ssh_key
|
||||||
|
inventory = inventory.format(host, host, user, ssh_key)
|
||||||
|
print inventory
|
||||||
|
directory = self.dirs[r.name]
|
||||||
|
inventory_path = os.path.join(directory, 'inventory')
|
||||||
|
with open(inventory_path, 'w') as inv:
|
||||||
|
inv.write(inventory)
|
||||||
|
return inventory_path
|
||||||
|
|
||||||
|
def _create_playbook(self, resource, action):
|
||||||
|
return self._compile_action_file(resource, action)
|
53
x/handlers/base.py
Normal file
53
x/handlers/base.py
Normal file
@ -0,0 +1,53 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
from jinja2 import Template
|
||||||
|
|
||||||
|
|
||||||
|
class BaseHandler(object):
|
||||||
|
def __init__(self, resources):
|
||||||
|
self.dst = tempfile.mkdtemp()
|
||||||
|
self.resources = resources
|
||||||
|
|
||||||
|
def __enter__(self):
|
||||||
|
self.dirs = {}
|
||||||
|
for resource in self.resources:
|
||||||
|
resource_dir = tempfile.mkdtemp(suffix=resource.name, dir=self.dst)
|
||||||
|
self.dirs[resource.name] = resource_dir
|
||||||
|
return self
|
||||||
|
|
||||||
|
def __exit__(self, type, value, traceback):
|
||||||
|
print self.dst
|
||||||
|
return
|
||||||
|
shutil.rmtree(self.dst)
|
||||||
|
|
||||||
|
def _compile_action_file(self, resource, action):
|
||||||
|
action_file = resource.metadata['actions'][action]
|
||||||
|
action_file = os.path.join(resource.base_dir, 'actions', action_file)
|
||||||
|
dir_path = self.dirs[resource.name]
|
||||||
|
dest_file = tempfile.mkstemp(text=True, prefix=action, dir=dir_path)[1]
|
||||||
|
args = self._make_args(resource)
|
||||||
|
self._compile_file(action_file, dest_file, args)
|
||||||
|
return dest_file
|
||||||
|
|
||||||
|
def _compile_file(self, template, dest_file, args):
|
||||||
|
print 'Rendering', template, args
|
||||||
|
with open(template) as f:
|
||||||
|
tpl = Template(f.read())
|
||||||
|
tpl = tpl.render(args, zip=zip)
|
||||||
|
|
||||||
|
with open(dest_file, 'w') as g:
|
||||||
|
g.write(tpl)
|
||||||
|
|
||||||
|
def _make_args(self, resource):
|
||||||
|
args = {'name': resource.name}
|
||||||
|
args['resource_dir'] = resource.base_dir
|
||||||
|
args.update(resource.args)
|
||||||
|
return args
|
||||||
|
|
||||||
|
|
||||||
|
class Empty(BaseHandler):
|
||||||
|
def action(self, resource, action):
|
||||||
|
pass
|
10
x/handlers/shell.py
Normal file
10
x/handlers/shell.py
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from x.handlers.base import BaseHandler
|
||||||
|
|
||||||
|
|
||||||
|
class Shell(BaseHandler):
|
||||||
|
def action(self, resource, action_name):
|
||||||
|
action_file = self._compile_action_file(resource, action_name)
|
||||||
|
subprocess.call(['bash', action_file])
|
190
x/observer.py
Normal file
190
x/observer.py
Normal file
@ -0,0 +1,190 @@
|
|||||||
|
from x import signals
|
||||||
|
|
||||||
|
|
||||||
|
class BaseObserver(object):
|
||||||
|
type_ = None
|
||||||
|
|
||||||
|
def __init__(self, attached_to, name, value):
|
||||||
|
"""
|
||||||
|
:param attached_to: resource.Resource
|
||||||
|
:param name:
|
||||||
|
:param value:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
self.attached_to = attached_to
|
||||||
|
self.name = name
|
||||||
|
self.value = value
|
||||||
|
self.receivers = []
|
||||||
|
|
||||||
|
def log(self, msg):
|
||||||
|
print '{} {}'.format(self, msg)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return '[{}:{}] {}'.format(self.attached_to.name, self.name, self.value)
|
||||||
|
|
||||||
|
def __unicode__(self):
|
||||||
|
return self.value
|
||||||
|
|
||||||
|
def __eq__(self, other):
|
||||||
|
if isinstance(other, BaseObserver):
|
||||||
|
return self.value == other.value
|
||||||
|
|
||||||
|
return self.value == other
|
||||||
|
|
||||||
|
def notify(self, emitter):
|
||||||
|
"""
|
||||||
|
:param emitter: Observer
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def update(self, value):
|
||||||
|
"""
|
||||||
|
:param value:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
raise NotImplementedError
|
||||||
|
|
||||||
|
def find_receiver(self, receiver):
|
||||||
|
fltr = [r for r in self.receivers
|
||||||
|
if r.attached_to == receiver.attached_to
|
||||||
|
and r.name == receiver.name]
|
||||||
|
if fltr:
|
||||||
|
return fltr[0]
|
||||||
|
|
||||||
|
def subscribe(self, receiver):
|
||||||
|
"""
|
||||||
|
:param receiver: Observer
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
self.log('Subscribe {}'.format(receiver))
|
||||||
|
# No multiple subscriptions
|
||||||
|
if self.find_receiver(receiver):
|
||||||
|
self.log('No multiple subscriptions from {}'.format(receiver))
|
||||||
|
return
|
||||||
|
self.receivers.append(receiver)
|
||||||
|
receiver.subscribed(self)
|
||||||
|
|
||||||
|
signals.Connections.add(
|
||||||
|
self.attached_to,
|
||||||
|
self.name,
|
||||||
|
receiver.attached_to,
|
||||||
|
receiver.name
|
||||||
|
)
|
||||||
|
|
||||||
|
receiver.notify(self)
|
||||||
|
|
||||||
|
def subscribed(self, emitter):
|
||||||
|
self.log('Subscribed {}'.format(emitter))
|
||||||
|
|
||||||
|
def unsubscribe(self, receiver):
|
||||||
|
"""
|
||||||
|
:param receiver: Observer
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
self.log('Unsubscribe {}'.format(receiver))
|
||||||
|
if self.find_receiver(receiver):
|
||||||
|
self.receivers.remove(receiver)
|
||||||
|
receiver.unsubscribed(self)
|
||||||
|
|
||||||
|
signals.Connections.remove(
|
||||||
|
self.attached_to,
|
||||||
|
self.name,
|
||||||
|
receiver.attached_to,
|
||||||
|
receiver.name
|
||||||
|
)
|
||||||
|
|
||||||
|
# TODO: ?
|
||||||
|
#receiver.notify(self)
|
||||||
|
|
||||||
|
def unsubscribed(self, emitter):
|
||||||
|
self.log('Unsubscribed {}'.format(emitter))
|
||||||
|
|
||||||
|
|
||||||
|
class Observer(BaseObserver):
|
||||||
|
type_ = 'simple'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(Observer, self).__init__(*args, **kwargs)
|
||||||
|
self.emitter = None
|
||||||
|
|
||||||
|
def notify(self, emitter):
|
||||||
|
self.log('Notify from {} value {}'.format(emitter, emitter.value))
|
||||||
|
# Copy emitter's values to receiver
|
||||||
|
self.value = emitter.value
|
||||||
|
for receiver in self.receivers:
|
||||||
|
receiver.notify(self)
|
||||||
|
self.attached_to.save()
|
||||||
|
|
||||||
|
def update(self, value):
|
||||||
|
self.log('Updating to value {}'.format(value))
|
||||||
|
self.value = value
|
||||||
|
for receiver in self.receivers:
|
||||||
|
receiver.notify(self)
|
||||||
|
self.attached_to.save()
|
||||||
|
|
||||||
|
def subscribed(self, emitter):
|
||||||
|
super(Observer, self).subscribed(emitter)
|
||||||
|
# Simple observer can be attached to at most one emitter
|
||||||
|
if self.emitter is not None:
|
||||||
|
self.emitter.unsubscribe(self)
|
||||||
|
self.emitter = emitter
|
||||||
|
|
||||||
|
def unsubscribed(self, emitter):
|
||||||
|
super(Observer, self).unsubscribed(emitter)
|
||||||
|
self.emitter = None
|
||||||
|
|
||||||
|
|
||||||
|
class ListObserver(BaseObserver):
|
||||||
|
type_ = 'list'
|
||||||
|
|
||||||
|
def __unicode__(self):
|
||||||
|
return unicode(self.value)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _format_value(emitter):
|
||||||
|
return {
|
||||||
|
'emitter': emitter.name,
|
||||||
|
'emitter_attached_to': emitter.attached_to.name,
|
||||||
|
'value': emitter.value,
|
||||||
|
}
|
||||||
|
|
||||||
|
def notify(self, emitter):
|
||||||
|
self.log('Notify from {} value {}'.format(emitter, emitter.value))
|
||||||
|
# Copy emitter's values to receiver
|
||||||
|
#self.value[emitter.attached_to.name] = emitter.value
|
||||||
|
idx = self._emitter_idx(emitter)
|
||||||
|
self.value[idx] = self._format_value(emitter)
|
||||||
|
for receiver in self.receivers:
|
||||||
|
receiver.notify(self)
|
||||||
|
self.attached_to.save()
|
||||||
|
|
||||||
|
def subscribed(self, emitter):
|
||||||
|
super(ListObserver, self).subscribed(emitter)
|
||||||
|
idx = self._emitter_idx(emitter)
|
||||||
|
if idx is None:
|
||||||
|
self.value.append(self._format_value(emitter))
|
||||||
|
|
||||||
|
def unsubscribed(self, emitter):
|
||||||
|
"""
|
||||||
|
:param receiver: Observer
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
self.log('Unsubscribed emitter {}'.format(emitter))
|
||||||
|
idx = self._emitter_idx(emitter)
|
||||||
|
self.value.pop(idx)
|
||||||
|
|
||||||
|
def _emitter_idx(self, emitter):
|
||||||
|
try:
|
||||||
|
return [i for i, e in enumerate(self.value)
|
||||||
|
if e['emitter_attached_to'] == emitter.attached_to.name
|
||||||
|
][0]
|
||||||
|
except IndexError:
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
def create(type_, *args, **kwargs):
|
||||||
|
for klass in BaseObserver.__subclasses__():
|
||||||
|
if klass.type_ == type_:
|
||||||
|
return klass(*args, **kwargs)
|
||||||
|
raise NotImplementedError('No handling class for type {}'.format(type_))
|
176
x/resource.py
Normal file
176
x/resource.py
Normal file
@ -0,0 +1,176 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
import copy
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import shutil
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from x import actions
|
||||||
|
from x import db
|
||||||
|
from x import observer
|
||||||
|
from x import signals
|
||||||
|
from x import utils
|
||||||
|
|
||||||
|
|
||||||
|
class Resource(object):
|
||||||
|
def __init__(self, name, metadata, args, base_dir, tags=None):
|
||||||
|
self.name = name
|
||||||
|
self.base_dir = base_dir
|
||||||
|
self.metadata = metadata
|
||||||
|
self.actions = metadata['actions'].keys() if metadata['actions'] else None
|
||||||
|
self.requires = metadata['input'].keys()
|
||||||
|
self._validate_args(args, metadata['input'])
|
||||||
|
self.args = {}
|
||||||
|
for arg_name, arg_value in args.items():
|
||||||
|
type_ = metadata.get('input-types', {}).get(arg_name) or 'simple'
|
||||||
|
self.args[arg_name] = observer.create(type_, self, arg_name, arg_value)
|
||||||
|
self.metadata['input'] = args
|
||||||
|
self.input_types = metadata.get('input-types', {})
|
||||||
|
self.changed = []
|
||||||
|
self.tags = tags or []
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return ("Resource(name='{0}', metadata={1}, args={2}, "
|
||||||
|
"base_dir='{3}', tags={4})").format(self.name,
|
||||||
|
json.dumps(self.metadata),
|
||||||
|
json.dumps(self.args_show()),
|
||||||
|
self.base_dir,
|
||||||
|
self.tags)
|
||||||
|
|
||||||
|
def args_show(self):
|
||||||
|
def formatter(v):
|
||||||
|
if isinstance(v, observer.ListObserver):
|
||||||
|
return v.value
|
||||||
|
elif isinstance(v, observer.Observer):
|
||||||
|
return {
|
||||||
|
'emitter': v.emitter.attached_to.name if v.emitter else None,
|
||||||
|
'value': v.value,
|
||||||
|
}
|
||||||
|
|
||||||
|
return v
|
||||||
|
|
||||||
|
return {k: formatter(v) for k, v in self.args.items()}
|
||||||
|
|
||||||
|
def args_dict(self):
|
||||||
|
return {k: v.value for k, v in self.args.items()}
|
||||||
|
|
||||||
|
def add_tag(self, tag):
|
||||||
|
if tag not in self.tags:
|
||||||
|
self.tags.append(tag)
|
||||||
|
|
||||||
|
def remove_tag(self, tag):
|
||||||
|
try:
|
||||||
|
self.tags.remove(tag)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def notify(self, emitter):
|
||||||
|
"""Update resource's args from emitter's args.
|
||||||
|
|
||||||
|
:param emitter: Resource
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
for key, value in emitter.args.iteritems():
|
||||||
|
self.args[key].notify(value)
|
||||||
|
|
||||||
|
def update(self, args):
|
||||||
|
"""This method updates resource's args with a simple dict.
|
||||||
|
|
||||||
|
:param args:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
# Update will be blocked if this resource is listening
|
||||||
|
# on some input that is to be updated -- we should only listen
|
||||||
|
# to the emitter and not be able to change the input's value
|
||||||
|
|
||||||
|
for key, value in args.iteritems():
|
||||||
|
self.args[key].update(value)
|
||||||
|
|
||||||
|
def action(self, action):
|
||||||
|
if action in self.actions:
|
||||||
|
actions.resource_action(self, action)
|
||||||
|
else:
|
||||||
|
raise Exception('Uuups, action is not available')
|
||||||
|
|
||||||
|
def _validate_args(self, args, inputs):
|
||||||
|
for req in self.requires:
|
||||||
|
if req not in args:
|
||||||
|
# If metadata input is filled with a value, use it as default
|
||||||
|
# and don't report an error
|
||||||
|
if inputs.get(req):
|
||||||
|
args[req] = inputs[req]
|
||||||
|
else:
|
||||||
|
raise Exception('Requirement `{0}` is missing in args'.format(req))
|
||||||
|
|
||||||
|
# TODO: versioning
|
||||||
|
def save(self):
|
||||||
|
metadata = copy.deepcopy(self.metadata)
|
||||||
|
|
||||||
|
metadata['tags'] = self.tags
|
||||||
|
metadata['input'] = self.args_dict()
|
||||||
|
|
||||||
|
meta_file = os.path.join(self.base_dir, 'meta.yaml')
|
||||||
|
with open(meta_file, 'w') as f:
|
||||||
|
f.write(yaml.dump(metadata))
|
||||||
|
f.write(yaml.dump(metadata, default_flow_style=False))
|
||||||
|
|
||||||
|
|
||||||
|
def create(name, base_path, dest_path, args, connections={}):
|
||||||
|
if not os.path.exists(base_path):
|
||||||
|
raise Exception('Base resource does not exist: {0}'.format(base_path))
|
||||||
|
if not os.path.exists(dest_path):
|
||||||
|
raise Exception('Dest dir does not exist: {0}'.format(dest_path))
|
||||||
|
if not os.path.isdir(dest_path):
|
||||||
|
raise Exception('Dest path is not a directory: {0}'.format(dest_path))
|
||||||
|
|
||||||
|
dest_path = os.path.abspath(os.path.join(dest_path, name))
|
||||||
|
base_meta_file = os.path.join(base_path, 'meta.yaml')
|
||||||
|
actions_path = os.path.join(base_path, 'actions')
|
||||||
|
|
||||||
|
meta = yaml.load(open(base_meta_file).read())
|
||||||
|
meta['id'] = name
|
||||||
|
meta['version'] = '1.0.0'
|
||||||
|
meta['actions'] = {}
|
||||||
|
meta['tags'] = []
|
||||||
|
|
||||||
|
if os.path.exists(actions_path):
|
||||||
|
for f in os.listdir(actions_path):
|
||||||
|
meta['actions'][os.path.splitext(f)[0]] = f
|
||||||
|
|
||||||
|
resource = Resource(name, meta, args, dest_path)
|
||||||
|
signals.assign_connections(resource, connections)
|
||||||
|
|
||||||
|
# save
|
||||||
|
shutil.copytree(base_path, dest_path)
|
||||||
|
resource.save()
|
||||||
|
db.resource_add(name, resource)
|
||||||
|
|
||||||
|
return resource
|
||||||
|
|
||||||
|
|
||||||
|
def load(dest_path):
|
||||||
|
meta_file = os.path.join(dest_path, 'meta.yaml')
|
||||||
|
meta = utils.load_file(meta_file)
|
||||||
|
name = meta['id']
|
||||||
|
args = meta['input']
|
||||||
|
tags = meta.get('tags', [])
|
||||||
|
|
||||||
|
resource = Resource(name, meta, args, dest_path, tags=tags)
|
||||||
|
|
||||||
|
db.resource_add(name, resource)
|
||||||
|
|
||||||
|
return resource
|
||||||
|
|
||||||
|
|
||||||
|
def load_all(dest_path):
|
||||||
|
ret = {}
|
||||||
|
|
||||||
|
for name in os.listdir(dest_path):
|
||||||
|
resource_path = os.path.join(dest_path, name)
|
||||||
|
resource = load(resource_path)
|
||||||
|
ret[resource.name] = resource
|
||||||
|
|
||||||
|
signals.Connections.reconnect_all()
|
||||||
|
|
||||||
|
return ret
|
5
x/resources/data_container/actions/echo.yml
Normal file
5
x/resources/data_container/actions/echo.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- shell: echo `/sbin/ifconfig`
|
6
x/resources/data_container/actions/remove.yml
Normal file
6
x/resources/data_container/actions/remove.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- shell: docker stop {{ name }}
|
||||||
|
- shell: docker rm {{ name }}
|
20
x/resources/data_container/actions/run.yml
Normal file
20
x/resources/data_container/actions/run.yml
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- docker:
|
||||||
|
name: {{ name }}
|
||||||
|
image: {{ image }}
|
||||||
|
state: running
|
||||||
|
net: host
|
||||||
|
ports:
|
||||||
|
{% for port in ports.value %}
|
||||||
|
- {{ port['value'] }}:{{ port['value'] }}
|
||||||
|
{% endfor %}
|
||||||
|
volumes:
|
||||||
|
# TODO: host_binds might need more work
|
||||||
|
# Currently it's not that trivial to pass custom src: dst here
|
||||||
|
# (when a config variable is passed here from other resource)
|
||||||
|
# so we mount it to the same directory as on host
|
||||||
|
{% for bind in host_binds.value %}
|
||||||
|
- {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }}
|
||||||
|
{% endfor %}
|
7
x/resources/data_container/meta.yaml
Normal file
7
x/resources/data_container/meta.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
id: data_container
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
ip:
|
||||||
|
image:
|
||||||
|
export_volumes:
|
6
x/resources/docker_container/actions/remove.yml
Normal file
6
x/resources/docker_container/actions/remove.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- shell: docker stop {{ name }}
|
||||||
|
- shell: docker rm {{ name }}
|
21
x/resources/docker_container/actions/run.yml
Normal file
21
x/resources/docker_container/actions/run.yml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- docker:
|
||||||
|
name: {{ name }}
|
||||||
|
image: {{ image }}
|
||||||
|
state: running
|
||||||
|
net: host
|
||||||
|
ports:
|
||||||
|
{% for port in ports.value %}
|
||||||
|
- {{ port['value'] }}:{{ port['value'] }}
|
||||||
|
{% endfor %}
|
||||||
|
volumes:
|
||||||
|
# TODO: host_binds might need more work
|
||||||
|
# Currently it's not that trivial to pass custom src: dst here
|
||||||
|
# (when a config variable is passed here from other resource)
|
||||||
|
# so we mount it to the same directory as on host
|
||||||
|
{% for bind in host_binds.value %}
|
||||||
|
- {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }}
|
||||||
|
{% endfor %}
|
15
x/resources/docker_container/meta.yaml
Normal file
15
x/resources/docker_container/meta.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
id: container
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
ip:
|
||||||
|
image:
|
||||||
|
ports:
|
||||||
|
host_binds:
|
||||||
|
volume_binds:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
input-types:
|
||||||
|
ports:
|
||||||
|
host_binds: list
|
||||||
|
volume_binds: list
|
3
x/resources/file/actions/remove.sh
Normal file
3
x/resources/file/actions/remove.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
rm {{ path }}
|
3
x/resources/file/actions/run.sh
Normal file
3
x/resources/file/actions/run.sh
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
touch {{ path }}
|
5
x/resources/file/meta.yaml
Normal file
5
x/resources/file/meta.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
id: file
|
||||||
|
handler: shell
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
path: /tmp/test_file
|
5
x/resources/haproxy/actions/remove.yml
Normal file
5
x/resources/haproxy/actions/remove.yml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
# TODO
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- file: path={{ config_dir.value['src'] }} state=absent
|
21
x/resources/haproxy/actions/run.yml
Normal file
21
x/resources/haproxy/actions/run.yml
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
# TODO
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
vars:
|
||||||
|
config_dir: {src: {{ config_dir.value['src'] }}, dst: {{ config_dir.value['dst'] }}}
|
||||||
|
haproxy_ip: {{ ip }}
|
||||||
|
haproxy_services:
|
||||||
|
{% for service, ports, listen_port in zip(configs.value, configs_ports.value, listen_ports.value) %}
|
||||||
|
- name: {{ service['emitter_attached_to'] }}
|
||||||
|
listen_port: {{ listen_port['value'] }}
|
||||||
|
servers:
|
||||||
|
{% for server_ip, server_port in zip(service['value'], ports['value']) %}
|
||||||
|
- name: {{ server_ip['emitter_attached_to'] }}
|
||||||
|
ip: {{ server_ip['value'] }}
|
||||||
|
port: {{ server_port['value'] }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
|
tasks:
|
||||||
|
- file: path={{ config_dir.value['src'] }}/ state=directory
|
||||||
|
- file: path={{ config_dir.value['src'] }}/haproxy.cfg state=touch
|
||||||
|
- template: src=/vagrant/haproxy.cfg dest={{ config_dir.value['src'] }}/haproxy.cfg
|
17
x/resources/haproxy/meta.yaml
Normal file
17
x/resources/haproxy/meta.yaml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
id: haproxy
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
ip:
|
||||||
|
config_dir: {src: /etc/solar/haproxy, dst: /etc/haproxy}
|
||||||
|
listen_ports:
|
||||||
|
configs:
|
||||||
|
configs_names:
|
||||||
|
configs_ports:
|
||||||
|
ssh_user:
|
||||||
|
ssh_key:
|
||||||
|
input-types:
|
||||||
|
listen_ports: list
|
||||||
|
configs: list
|
||||||
|
configs_names: list
|
||||||
|
configs_ports: list
|
11
x/resources/haproxy_config/meta.yaml
Normal file
11
x/resources/haproxy_config/meta.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
id: haproxy_config
|
||||||
|
handler: none
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
name:
|
||||||
|
listen_port:
|
||||||
|
ports:
|
||||||
|
servers:
|
||||||
|
input-types:
|
||||||
|
ports: list
|
||||||
|
servers: list
|
4
x/resources/keystone_config/actions/remove.yml
Normal file
4
x/resources/keystone_config/actions/remove.yml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- file: path={{config_dir}} state=absent
|
14
x/resources/keystone_config/actions/run.yml
Normal file
14
x/resources/keystone_config/actions/run.yml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
vars:
|
||||||
|
admin_token: {{admin_token}}
|
||||||
|
db_user: {{db_user}}
|
||||||
|
db_password: {{db_password}}
|
||||||
|
db_host: {{db_host}}
|
||||||
|
db_name: {{db_name}}
|
||||||
|
tasks:
|
||||||
|
- file: path={{config_dir}} state=directory
|
||||||
|
- template: src={{resource_dir}}/templates/keystone.conf dest={{config_dir}}/keystone.conf
|
||||||
|
- template: src={{resource_dir}}/templates/default_catalog.templates dest={{config_dir}}/default_catalog.templates
|
||||||
|
- template: src={{resource_dir}}/templates/logging.conf dest={{config_dir}}/logging.conf
|
||||||
|
- template: src={{resource_dir}}/templates/policy.json dest={{config_dir}}/policy.json
|
13
x/resources/keystone_config/meta.yaml
Normal file
13
x/resources/keystone_config/meta.yaml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
id: keystone_config
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
config_dir:
|
||||||
|
admin_token:
|
||||||
|
db_user:
|
||||||
|
db_password:
|
||||||
|
db_host:
|
||||||
|
db_name:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
@ -0,0 +1,27 @@
|
|||||||
|
# config for templated.Catalog, using camelCase because I don't want to do
|
||||||
|
# translations for keystone compat
|
||||||
|
catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
|
||||||
|
catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
|
||||||
|
catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
|
||||||
|
catalog.RegionOne.identity.name = Identity Service
|
||||||
|
|
||||||
|
# fake compute service for now to help novaclient tests work
|
||||||
|
catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s
|
||||||
|
catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s
|
||||||
|
catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s
|
||||||
|
catalog.RegionOne.compute.name = Compute Service
|
||||||
|
|
||||||
|
catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s
|
||||||
|
catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s
|
||||||
|
catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s
|
||||||
|
catalog.RegionOne.volume.name = Volume Service
|
||||||
|
|
||||||
|
catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud
|
||||||
|
catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin
|
||||||
|
catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud
|
||||||
|
catalog.RegionOne.ec2.name = EC2 Service
|
||||||
|
|
||||||
|
catalog.RegionOne.image.publicURL = http://localhost:9292/v1
|
||||||
|
catalog.RegionOne.image.adminURL = http://localhost:9292/v1
|
||||||
|
catalog.RegionOne.image.internalURL = http://localhost:9292/v1
|
||||||
|
catalog.RegionOne.image.name = Image Service
|
1589
x/resources/keystone_config/templates/keystone.conf
Normal file
1589
x/resources/keystone_config/templates/keystone.conf
Normal file
File diff suppressed because it is too large
Load Diff
65
x/resources/keystone_config/templates/logging.conf
Normal file
65
x/resources/keystone_config/templates/logging.conf
Normal file
@ -0,0 +1,65 @@
|
|||||||
|
[loggers]
|
||||||
|
keys=root,access
|
||||||
|
|
||||||
|
[handlers]
|
||||||
|
keys=production,file,access_file,devel
|
||||||
|
|
||||||
|
[formatters]
|
||||||
|
keys=minimal,normal,debug
|
||||||
|
|
||||||
|
|
||||||
|
###########
|
||||||
|
# Loggers #
|
||||||
|
###########
|
||||||
|
|
||||||
|
[logger_root]
|
||||||
|
level=WARNING
|
||||||
|
handlers=file
|
||||||
|
|
||||||
|
[logger_access]
|
||||||
|
level=INFO
|
||||||
|
qualname=access
|
||||||
|
handlers=access_file
|
||||||
|
|
||||||
|
|
||||||
|
################
|
||||||
|
# Log Handlers #
|
||||||
|
################
|
||||||
|
|
||||||
|
[handler_production]
|
||||||
|
class=handlers.SysLogHandler
|
||||||
|
level=ERROR
|
||||||
|
formatter=normal
|
||||||
|
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
|
||||||
|
|
||||||
|
[handler_file]
|
||||||
|
class=handlers.WatchedFileHandler
|
||||||
|
level=WARNING
|
||||||
|
formatter=normal
|
||||||
|
args=('error.log',)
|
||||||
|
|
||||||
|
[handler_access_file]
|
||||||
|
class=handlers.WatchedFileHandler
|
||||||
|
level=INFO
|
||||||
|
formatter=minimal
|
||||||
|
args=('access.log',)
|
||||||
|
|
||||||
|
[handler_devel]
|
||||||
|
class=StreamHandler
|
||||||
|
level=NOTSET
|
||||||
|
formatter=debug
|
||||||
|
args=(sys.stdout,)
|
||||||
|
|
||||||
|
|
||||||
|
##################
|
||||||
|
# Log Formatters #
|
||||||
|
##################
|
||||||
|
|
||||||
|
[formatter_minimal]
|
||||||
|
format=%(message)s
|
||||||
|
|
||||||
|
[formatter_normal]
|
||||||
|
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
|
||||||
|
|
||||||
|
[formatter_debug]
|
||||||
|
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
|
171
x/resources/keystone_config/templates/policy.json
Normal file
171
x/resources/keystone_config/templates/policy.json
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
{
|
||||||
|
"admin_required": "role:admin or is_admin:1",
|
||||||
|
"service_role": "role:service",
|
||||||
|
"service_or_admin": "rule:admin_required or rule:service_role",
|
||||||
|
"owner" : "user_id:%(user_id)s",
|
||||||
|
"admin_or_owner": "rule:admin_required or rule:owner",
|
||||||
|
|
||||||
|
"default": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_region": "",
|
||||||
|
"identity:list_regions": "",
|
||||||
|
"identity:create_region": "rule:admin_required",
|
||||||
|
"identity:update_region": "rule:admin_required",
|
||||||
|
"identity:delete_region": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_service": "rule:admin_required",
|
||||||
|
"identity:list_services": "rule:admin_required",
|
||||||
|
"identity:create_service": "rule:admin_required",
|
||||||
|
"identity:update_service": "rule:admin_required",
|
||||||
|
"identity:delete_service": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_endpoint": "rule:admin_required",
|
||||||
|
"identity:list_endpoints": "rule:admin_required",
|
||||||
|
"identity:create_endpoint": "rule:admin_required",
|
||||||
|
"identity:update_endpoint": "rule:admin_required",
|
||||||
|
"identity:delete_endpoint": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_domain": "rule:admin_required",
|
||||||
|
"identity:list_domains": "rule:admin_required",
|
||||||
|
"identity:create_domain": "rule:admin_required",
|
||||||
|
"identity:update_domain": "rule:admin_required",
|
||||||
|
"identity:delete_domain": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_project": "rule:admin_required",
|
||||||
|
"identity:list_projects": "rule:admin_required",
|
||||||
|
"identity:list_user_projects": "rule:admin_or_owner",
|
||||||
|
"identity:create_project": "rule:admin_required",
|
||||||
|
"identity:update_project": "rule:admin_required",
|
||||||
|
"identity:delete_project": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_user": "rule:admin_required",
|
||||||
|
"identity:list_users": "rule:admin_required",
|
||||||
|
"identity:create_user": "rule:admin_required",
|
||||||
|
"identity:update_user": "rule:admin_required",
|
||||||
|
"identity:delete_user": "rule:admin_required",
|
||||||
|
"identity:change_password": "rule:admin_or_owner",
|
||||||
|
|
||||||
|
"identity:get_group": "rule:admin_required",
|
||||||
|
"identity:list_groups": "rule:admin_required",
|
||||||
|
"identity:list_groups_for_user": "rule:admin_or_owner",
|
||||||
|
"identity:create_group": "rule:admin_required",
|
||||||
|
"identity:update_group": "rule:admin_required",
|
||||||
|
"identity:delete_group": "rule:admin_required",
|
||||||
|
"identity:list_users_in_group": "rule:admin_required",
|
||||||
|
"identity:remove_user_from_group": "rule:admin_required",
|
||||||
|
"identity:check_user_in_group": "rule:admin_required",
|
||||||
|
"identity:add_user_to_group": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_credential": "rule:admin_required",
|
||||||
|
"identity:list_credentials": "rule:admin_required",
|
||||||
|
"identity:create_credential": "rule:admin_required",
|
||||||
|
"identity:update_credential": "rule:admin_required",
|
||||||
|
"identity:delete_credential": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:ec2_get_credential": "rule:admin_or_owner",
|
||||||
|
"identity:ec2_list_credentials": "rule:admin_or_owner",
|
||||||
|
"identity:ec2_create_credential": "rule:admin_or_owner",
|
||||||
|
"identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
|
||||||
|
|
||||||
|
"identity:get_role": "rule:admin_required",
|
||||||
|
"identity:list_roles": "rule:admin_required",
|
||||||
|
"identity:create_role": "rule:admin_required",
|
||||||
|
"identity:update_role": "rule:admin_required",
|
||||||
|
"identity:delete_role": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:check_grant": "rule:admin_required",
|
||||||
|
"identity:list_grants": "rule:admin_required",
|
||||||
|
"identity:create_grant": "rule:admin_required",
|
||||||
|
"identity:revoke_grant": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:list_role_assignments": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_policy": "rule:admin_required",
|
||||||
|
"identity:list_policies": "rule:admin_required",
|
||||||
|
"identity:create_policy": "rule:admin_required",
|
||||||
|
"identity:update_policy": "rule:admin_required",
|
||||||
|
"identity:delete_policy": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:check_token": "rule:admin_required",
|
||||||
|
"identity:validate_token": "rule:service_or_admin",
|
||||||
|
"identity:validate_token_head": "rule:service_or_admin",
|
||||||
|
"identity:revocation_list": "rule:service_or_admin",
|
||||||
|
"identity:revoke_token": "rule:admin_or_owner",
|
||||||
|
|
||||||
|
"identity:create_trust": "user_id:%(trust.trustor_user_id)s",
|
||||||
|
"identity:get_trust": "rule:admin_or_owner",
|
||||||
|
"identity:list_trusts": "",
|
||||||
|
"identity:list_roles_for_trust": "",
|
||||||
|
"identity:check_role_for_trust": "",
|
||||||
|
"identity:get_role_for_trust": "",
|
||||||
|
"identity:delete_trust": "",
|
||||||
|
|
||||||
|
"identity:create_consumer": "rule:admin_required",
|
||||||
|
"identity:get_consumer": "rule:admin_required",
|
||||||
|
"identity:list_consumers": "rule:admin_required",
|
||||||
|
"identity:delete_consumer": "rule:admin_required",
|
||||||
|
"identity:update_consumer": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:authorize_request_token": "rule:admin_required",
|
||||||
|
"identity:list_access_token_roles": "rule:admin_required",
|
||||||
|
"identity:get_access_token_role": "rule:admin_required",
|
||||||
|
"identity:list_access_tokens": "rule:admin_required",
|
||||||
|
"identity:get_access_token": "rule:admin_required",
|
||||||
|
"identity:delete_access_token": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:list_projects_for_endpoint": "rule:admin_required",
|
||||||
|
"identity:add_endpoint_to_project": "rule:admin_required",
|
||||||
|
"identity:check_endpoint_in_project": "rule:admin_required",
|
||||||
|
"identity:list_endpoints_for_project": "rule:admin_required",
|
||||||
|
"identity:remove_endpoint_from_project": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:create_endpoint_group": "rule:admin_required",
|
||||||
|
"identity:list_endpoint_groups": "rule:admin_required",
|
||||||
|
"identity:get_endpoint_group": "rule:admin_required",
|
||||||
|
"identity:update_endpoint_group": "rule:admin_required",
|
||||||
|
"identity:delete_endpoint_group": "rule:admin_required",
|
||||||
|
"identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
|
||||||
|
"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
|
||||||
|
"identity:list_endpoint_groups_for_project": "rule:admin_required",
|
||||||
|
"identity:add_endpoint_group_to_project": "rule:admin_required",
|
||||||
|
"identity:remove_endpoint_group_from_project": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:create_identity_provider": "rule:admin_required",
|
||||||
|
"identity:list_identity_providers": "rule:admin_required",
|
||||||
|
"identity:get_identity_providers": "rule:admin_required",
|
||||||
|
"identity:update_identity_provider": "rule:admin_required",
|
||||||
|
"identity:delete_identity_provider": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:create_protocol": "rule:admin_required",
|
||||||
|
"identity:update_protocol": "rule:admin_required",
|
||||||
|
"identity:get_protocol": "rule:admin_required",
|
||||||
|
"identity:list_protocols": "rule:admin_required",
|
||||||
|
"identity:delete_protocol": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:create_mapping": "rule:admin_required",
|
||||||
|
"identity:get_mapping": "rule:admin_required",
|
||||||
|
"identity:list_mappings": "rule:admin_required",
|
||||||
|
"identity:delete_mapping": "rule:admin_required",
|
||||||
|
"identity:update_mapping": "rule:admin_required",
|
||||||
|
|
||||||
|
"identity:get_auth_catalog": "",
|
||||||
|
"identity:get_auth_projects": "",
|
||||||
|
"identity:get_auth_domains": "",
|
||||||
|
|
||||||
|
"identity:list_projects_for_groups": "",
|
||||||
|
"identity:list_domains_for_groups": "",
|
||||||
|
|
||||||
|
"identity:list_revoke_events": "",
|
||||||
|
|
||||||
|
"identity:create_policy_association_for_endpoint": "rule:admin_required",
|
||||||
|
"identity:check_policy_association_for_endpoint": "rule:admin_required",
|
||||||
|
"identity:delete_policy_association_for_endpoint": "rule:admin_required",
|
||||||
|
"identity:create_policy_association_for_service": "rule:admin_required",
|
||||||
|
"identity:check_policy_association_for_service": "rule:admin_required",
|
||||||
|
"identity:delete_policy_association_for_service": "rule:admin_required",
|
||||||
|
"identity:create_policy_association_for_region_and_service": "rule:admin_required",
|
||||||
|
"identity:check_policy_association_for_region_and_service": "rule:admin_required",
|
||||||
|
"identity:delete_policy_association_for_region_and_service": "rule:admin_required",
|
||||||
|
"identity:get_policy_for_endpoint": "rule:admin_required",
|
||||||
|
"identity:list_endpoints_for_policy": "rule:admin_required"
|
||||||
|
}
|
6
x/resources/keystone_service/actions/remove.yml
Normal file
6
x/resources/keystone_service/actions/remove.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# TODO
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- shell: docker stop {{ name }}
|
||||||
|
- shell: docker rm {{ name }}
|
17
x/resources/keystone_service/actions/run.yml
Normal file
17
x/resources/keystone_service/actions/run.yml
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: keystone container
|
||||||
|
docker:
|
||||||
|
command: /bin/bash -c "keystone-manage db_sync && /usr/bin/keystone-all"
|
||||||
|
name: {{ name }}
|
||||||
|
image: {{ image }}
|
||||||
|
state: running
|
||||||
|
expose:
|
||||||
|
- 5000
|
||||||
|
- 35357
|
||||||
|
ports:
|
||||||
|
- {{ port }}:5000
|
||||||
|
- {{ admin_port }}:35357
|
||||||
|
volumes:
|
||||||
|
- {{ config_dir }}:/etc/keystone
|
11
x/resources/keystone_service/meta.yaml
Normal file
11
x/resources/keystone_service/meta.yaml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
id: keystone
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
image: kollaglue/centos-rdo-keystone
|
||||||
|
config_dir:
|
||||||
|
port:
|
||||||
|
admin_port:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
6
x/resources/keystone_user/actions/remove.yml
Normal file
6
x/resources/keystone_user/actions/remove.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: keystone user
|
||||||
|
- keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ user={{user_name}} tenant={{tenant_name}} state=absent
|
||||||
|
- keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ tenant={{tenant_name}} state=absent
|
6
x/resources/keystone_user/actions/run.yml
Normal file
6
x/resources/keystone_user/actions/run.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: keystone user
|
||||||
|
- keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ tenant={{tenant_name}} state=present
|
||||||
|
- keystone_user: endpoint=http://{keystone_host}}:{{keystone_port}}/v2.0/ user={{user_name}} password={{user_password}} tenant={{tenant_name}} state=present
|
14
x/resources/keystone_user/meta.yaml
Normal file
14
x/resources/keystone_user/meta.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
id: keystone_user
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
keystone_host:
|
||||||
|
keystone_port:
|
||||||
|
login_user:
|
||||||
|
login_token:
|
||||||
|
user_name:
|
||||||
|
user_password:
|
||||||
|
tenant_name:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
11
x/resources/mariadb_db/actions/remove.yml
Normal file
11
x/resources/mariadb_db/actions/remove.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: mariadb db
|
||||||
|
mysql_db:
|
||||||
|
name: {{db_name}}
|
||||||
|
state: absent
|
||||||
|
login_user: root
|
||||||
|
login_password: {{login_password}}
|
||||||
|
login_port: {{login_port}}
|
||||||
|
login_host: 127.0.0.1
|
11
x/resources/mariadb_db/actions/run.yml
Normal file
11
x/resources/mariadb_db/actions/run.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: mariadb db
|
||||||
|
mysql_db:
|
||||||
|
name: {{db_name}}
|
||||||
|
state: present
|
||||||
|
login_user: root
|
||||||
|
login_password: {{login_password}}
|
||||||
|
login_port: {{login_port}}
|
||||||
|
login_host: 127.0.0.1
|
14
x/resources/mariadb_db/meta.yaml
Normal file
14
x/resources/mariadb_db/meta.yaml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
id: mariadb_table
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
actions:
|
||||||
|
run: run.yml
|
||||||
|
remove: remove.yml
|
||||||
|
input:
|
||||||
|
db_name:
|
||||||
|
login_password:
|
||||||
|
login_port:
|
||||||
|
login_user:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
8
x/resources/mariadb_service/actions/remove.yml
Normal file
8
x/resources/mariadb_service/actions/remove.yml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: mariadb container
|
||||||
|
docker:
|
||||||
|
name: {{ name }}
|
||||||
|
image: {{ image }}
|
||||||
|
state: absent
|
12
x/resources/mariadb_service/actions/run.yml
Normal file
12
x/resources/mariadb_service/actions/run.yml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: mariadb container
|
||||||
|
docker:
|
||||||
|
name: {{ name }}
|
||||||
|
image: {{ image }}
|
||||||
|
state: running
|
||||||
|
ports:
|
||||||
|
- {{ port }}:3306
|
||||||
|
env:
|
||||||
|
MYSQL_ROOT_PASSWORD: {{ root_password }}
|
10
x/resources/mariadb_service/meta.yaml
Normal file
10
x/resources/mariadb_service/meta.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
id: mariadb
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
image:
|
||||||
|
root_password:
|
||||||
|
port:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
11
x/resources/mariadb_user/actions/remove.yml
Normal file
11
x/resources/mariadb_user/actions/remove.yml
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: mariadb user
|
||||||
|
mysql_user:
|
||||||
|
name: {{new_user_name}}
|
||||||
|
state: absent
|
||||||
|
login_user: root
|
||||||
|
login_password: {{login_password}}
|
||||||
|
login_port: {{login_port}}
|
||||||
|
login_host: 127.0.0.1
|
14
x/resources/mariadb_user/actions/run.yml
Normal file
14
x/resources/mariadb_user/actions/run.yml
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- name: mariadb user
|
||||||
|
mysql_user:
|
||||||
|
name: {{new_user_name}}
|
||||||
|
password: {{new_user_password}}
|
||||||
|
priv: {{db_name}}.*:ALL
|
||||||
|
host: '%'
|
||||||
|
state: present
|
||||||
|
login_user: root
|
||||||
|
login_password: {{login_password}}
|
||||||
|
login_port: {{login_port}}
|
||||||
|
login_host: 127.0.0.1
|
16
x/resources/mariadb_user/meta.yaml
Normal file
16
x/resources/mariadb_user/meta.yaml
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
id: mariadb_user
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
actions:
|
||||||
|
run: run.yml
|
||||||
|
remove: remove.yml
|
||||||
|
input:
|
||||||
|
new_user_password:
|
||||||
|
new_user_name:
|
||||||
|
db_name:
|
||||||
|
login_password:
|
||||||
|
login_port:
|
||||||
|
login_user:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
6
x/resources/nova/actions/remove.yml
Normal file
6
x/resources/nova/actions/remove.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# TODO
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- shell: docker stop {{ name }}
|
||||||
|
- shell: docker rm {{ name }}
|
6
x/resources/nova/actions/run.yml
Normal file
6
x/resources/nova/actions/run.yml
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# TODO
|
||||||
|
- hosts: [{{ ip }}]
|
||||||
|
sudo: yes
|
||||||
|
tasks:
|
||||||
|
- shell: docker run -d --net="host" --privileged \
|
||||||
|
--name {{ name }} {{ image }}
|
7
x/resources/nova/meta.yaml
Normal file
7
x/resources/nova/meta.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
id: nova
|
||||||
|
handler: ansible
|
||||||
|
version: 1.0.0
|
||||||
|
input:
|
||||||
|
ip:
|
||||||
|
port: 8774
|
||||||
|
image: # TODO
|
8
x/resources/ro_node/meta.yaml
Normal file
8
x/resources/ro_node/meta.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
id: mariadb
|
||||||
|
handler: none
|
||||||
|
version: 1.0.0
|
||||||
|
actions:
|
||||||
|
input:
|
||||||
|
ip:
|
||||||
|
ssh_key:
|
||||||
|
ssh_user:
|
201
x/signals.py
Normal file
201
x/signals.py
Normal file
@ -0,0 +1,201 @@
|
|||||||
|
# -*- coding: UTF-8 -*-
|
||||||
|
from collections import defaultdict
|
||||||
|
import itertools
|
||||||
|
import networkx as nx
|
||||||
|
import os
|
||||||
|
|
||||||
|
import db
|
||||||
|
|
||||||
|
from x import utils
|
||||||
|
|
||||||
|
|
||||||
|
CLIENTS_CONFIG_KEY = 'clients-data-file'
|
||||||
|
CLIENTS = utils.read_config_file(CLIENTS_CONFIG_KEY)
|
||||||
|
|
||||||
|
|
||||||
|
class Connections(object):
|
||||||
|
@staticmethod
|
||||||
|
def add(emitter, src, receiver, dst):
|
||||||
|
if src not in emitter.args:
|
||||||
|
return
|
||||||
|
|
||||||
|
# TODO: implement general circular detection, this one is simple
|
||||||
|
if [emitter.name, src] in CLIENTS.get(receiver.name, {}).get(dst, []):
|
||||||
|
raise Exception('Attempted to create cycle in dependencies. Not nice.')
|
||||||
|
|
||||||
|
CLIENTS.setdefault(emitter.name, {})
|
||||||
|
CLIENTS[emitter.name].setdefault(src, [])
|
||||||
|
if [receiver.name, dst] not in CLIENTS[emitter.name][src]:
|
||||||
|
CLIENTS[emitter.name][src].append([receiver.name, dst])
|
||||||
|
|
||||||
|
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def remove(emitter, src, receiver, dst):
|
||||||
|
CLIENTS[emitter.name][src] = [
|
||||||
|
destination for destination in CLIENTS[emitter.name][src]
|
||||||
|
if destination != [receiver.name, dst]
|
||||||
|
]
|
||||||
|
|
||||||
|
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def reconnect_all():
|
||||||
|
"""Reconstruct connections for resource inputs from CLIENTS.
|
||||||
|
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
for emitter_name, dest_dict in CLIENTS.items():
|
||||||
|
emitter = db.get_resource(emitter_name)
|
||||||
|
for emitter_input, destinations in dest_dict.items():
|
||||||
|
for receiver_name, receiver_input in destinations:
|
||||||
|
receiver = db.get_resource(receiver_name)
|
||||||
|
emitter.args[emitter_input].subscribe(
|
||||||
|
receiver.args[receiver_input])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def clear():
|
||||||
|
global CLIENTS
|
||||||
|
|
||||||
|
CLIENTS = {}
|
||||||
|
|
||||||
|
path = utils.read_config()[CLIENTS_CONFIG_KEY]
|
||||||
|
if os.path.exists(path):
|
||||||
|
os.remove(path)
|
||||||
|
|
||||||
|
|
||||||
|
def guess_mapping(emitter, receiver):
|
||||||
|
"""Guess connection mapping between emitter and receiver.
|
||||||
|
|
||||||
|
Suppose emitter and receiver have common inputs:
|
||||||
|
ip, ssh_key, ssh_user
|
||||||
|
|
||||||
|
Then we return a connection mapping like this:
|
||||||
|
|
||||||
|
{
|
||||||
|
'ip': '<receiver>.ip',
|
||||||
|
'ssh_key': '<receiver>.ssh_key',
|
||||||
|
'ssh_user': '<receiver>.ssh_user'
|
||||||
|
}
|
||||||
|
|
||||||
|
:param emitter:
|
||||||
|
:param receiver:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
guessed = {}
|
||||||
|
for key in emitter.requires:
|
||||||
|
if key in receiver.requires:
|
||||||
|
guessed[key] = key
|
||||||
|
|
||||||
|
return guessed
|
||||||
|
|
||||||
|
|
||||||
|
def connect(emitter, receiver, mapping=None):
|
||||||
|
guessed = guess_mapping(emitter, receiver)
|
||||||
|
mapping = mapping or guessed
|
||||||
|
|
||||||
|
for src, dst in mapping.items():
|
||||||
|
# Disconnect all receiver inputs
|
||||||
|
# Check if receiver input is of list type first
|
||||||
|
if receiver.args[dst].type_ != 'list':
|
||||||
|
disconnect_receiver_by_input(receiver, dst)
|
||||||
|
|
||||||
|
emitter.args[src].subscribe(receiver.args[dst])
|
||||||
|
|
||||||
|
receiver.save()
|
||||||
|
|
||||||
|
|
||||||
|
def disconnect(emitter, receiver):
|
||||||
|
for src, destinations in CLIENTS[emitter.name].items():
|
||||||
|
disconnect_by_src(emitter, src, receiver)
|
||||||
|
|
||||||
|
for destination in destinations:
|
||||||
|
receiver_input = destination[1]
|
||||||
|
if receiver.args[receiver_input].type_ != 'list':
|
||||||
|
print 'Removing input {} from {}'.format(receiver_input, receiver.name)
|
||||||
|
emitter.args[src].unsubscribe(receiver.args[receiver_input])
|
||||||
|
|
||||||
|
|
||||||
|
def disconnect_receiver_by_input(receiver, input):
|
||||||
|
"""Find receiver connection by input and disconnect it.
|
||||||
|
|
||||||
|
:param receiver:
|
||||||
|
:param input:
|
||||||
|
:return:
|
||||||
|
"""
|
||||||
|
for emitter_name, inputs in CLIENTS.items():
|
||||||
|
emitter = db.get_resource(emitter_name)
|
||||||
|
disconnect_by_src(emitter, input, receiver)
|
||||||
|
|
||||||
|
|
||||||
|
def disconnect_by_src(emitter, src, receiver):
|
||||||
|
if src in CLIENTS[emitter.name]:
|
||||||
|
CLIENTS[emitter.name][src] = [
|
||||||
|
destination for destination in CLIENTS[emitter.name][src]
|
||||||
|
if destination[0] != receiver.name
|
||||||
|
]
|
||||||
|
|
||||||
|
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
|
||||||
|
|
||||||
|
|
||||||
|
def notify(source, key, value):
|
||||||
|
CLIENTS.setdefault(source.name, {})
|
||||||
|
print 'Notify', source.name, key, value, CLIENTS[source.name]
|
||||||
|
if key in CLIENTS[source.name]:
|
||||||
|
for client, r_key in CLIENTS[source.name][key]:
|
||||||
|
resource = db.get_resource(client)
|
||||||
|
print 'Resource found', client
|
||||||
|
if resource:
|
||||||
|
resource.update({r_key: value}, emitter=source)
|
||||||
|
else:
|
||||||
|
print 'Resource {} deleted?'.format(client)
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def assign_connections(receiver, connections):
|
||||||
|
mappings = defaultdict(list)
|
||||||
|
for key, dest in connections.iteritems():
|
||||||
|
resource, r_key = dest.split('.')
|
||||||
|
mappings[resource].append([r_key, key])
|
||||||
|
for resource, r_mappings in mappings.iteritems():
|
||||||
|
connect(resource, receiver, r_mappings)
|
||||||
|
|
||||||
|
|
||||||
|
def connection_graph():
|
||||||
|
resource_dependencies = {}
|
||||||
|
|
||||||
|
for source, destination_values in CLIENTS.items():
|
||||||
|
resource_dependencies.setdefault(source, set())
|
||||||
|
for src, destinations in destination_values.items():
|
||||||
|
resource_dependencies[source].update([
|
||||||
|
destination[0] for destination in destinations
|
||||||
|
])
|
||||||
|
|
||||||
|
g = nx.DiGraph()
|
||||||
|
|
||||||
|
# TODO: tags as graph node attributes
|
||||||
|
for source, destinations in resource_dependencies.items():
|
||||||
|
g.add_node(source)
|
||||||
|
g.add_nodes_from(destinations)
|
||||||
|
g.add_edges_from(
|
||||||
|
itertools.izip(
|
||||||
|
itertools.repeat(source),
|
||||||
|
destinations
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return g
|
||||||
|
|
||||||
|
|
||||||
|
def detailed_connection_graph():
|
||||||
|
g = nx.MultiDiGraph()
|
||||||
|
|
||||||
|
for emitter_name, destination_values in CLIENTS.items():
|
||||||
|
for emitter_input, receivers in CLIENTS[emitter_name].items():
|
||||||
|
for receiver_name, receiver_input in receivers:
|
||||||
|
label = emitter_input
|
||||||
|
if emitter_input != receiver_input:
|
||||||
|
label = '{}:{}'.format(emitter_input, receiver_input)
|
||||||
|
g.add_edge(emitter_name, receiver_name, label=label)
|
||||||
|
|
||||||
|
return g
|
1
x/test/__init__.py
Normal file
1
x/test/__init__.py
Normal file
@ -0,0 +1 @@
|
|||||||
|
__author__ = 'przemek'
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user