Moved examples, resources and templates

new location is https://github.com/Mirantis/solar-resources,
later will be changed to openstack one.
- vagrant stuff assumes that solar-resources is cloned into /vagrant/solar-resources
- adjusted docker compose file
- added solar-resources to .gitignore

Change-Id: If2fea99145395606e6c15c9adbc127ecff4823f9
This commit is contained in:
Jedrzej Nowak 2016-01-13 13:08:28 +01:00
parent 0e808a985d
commit 4060b36fed
351 changed files with 19 additions and 16182 deletions

3
.gitignore vendored
View File

@ -57,3 +57,6 @@ solar/.cache
.solar_config_override .solar_config_override
.ropeproject .ropeproject
# for simplicity let's keep solar-resources there
solar-resources

View File

@ -63,7 +63,7 @@ For now all commands should be executed from `solar-dev` machine from `/vagrant
Basic flow is: Basic flow is:
1. Create some resources (look at `examples/openstack/openstack.py`) and connect 1. Create some resources (look at `solar-resources/examples/openstack/openstack.py`) and connect
them between each other, and place them on nodes. them between each other, and place them on nodes.
1. Run `solar changes stage` (this stages the changes) 1. Run `solar changes stage` (this stages the changes)
1. Run `solar changes process` (this prepares orchestrator graph, returning 1. Run `solar changes process` (this prepares orchestrator graph, returning
@ -288,7 +288,7 @@ riak_master_service.connect_list(
) )
``` ```
For full Riak example, please look at `examples/riak/riaks-template.py`. For full Riak example, please look at `solar-resources/examples/riak/riaks-template.py`.
Full documentation of individual functions is found in the `solar/template.py` file. Full documentation of individual functions is found in the `solar/template.py` file.
@ -301,4 +301,4 @@ Solar is shipped with sane defaults in `vagrant-setting.yaml_defaults`. If you n
* In `vagrant-setting.yaml_defaults` or `vagrant-settings.yaml` file uncomment `preprovisioned: false` line. * In `vagrant-setting.yaml_defaults` or `vagrant-settings.yaml` file uncomment `preprovisioned: false` line.
* Run `vagrant up`, it will take some time because it builds image for bootstrap and IBP images. * Run `vagrant up`, it will take some time because it builds image for bootstrap and IBP images.
* Now you can run provisioning `/vagrant/examples/provisioning/provision.sh` * Now you can run provisioning `/vagrant/solar-resources/examples/provisioning/provision.sh`

View File

@ -14,6 +14,10 @@
- hosts: all - hosts: all
tasks: tasks:
# setup solar-resources
# change to openstack/solar-resources later
- git: repo=https://github.com/Mirantis/solar-resources.git dest=/vagrant/solar-resources update=no owner=vagrant
# set default config location # set default config location
- lineinfile: - lineinfile:
dest: /home/vagrant/.bashrc dest: /home/vagrant/.bashrc
@ -48,8 +52,8 @@
state: present state: present
- file: path=/var/lib/solar/repositories state=directory owner=vagrant - file: path=/var/lib/solar/repositories state=directory owner=vagrant
- file: src=/vagrant/resources dest=/var/lib/solar/repositories/resources state=link owner=vagrant - file: src=/vagrant/solar-resources/resources dest=/var/lib/solar/repositories/resources state=link owner=vagrant
- file: src=/vagrant/templates dest=/var/lib/solar/repositories/templates state=link owner=vagrant - file: src=/vagrant/solar-resources/templates dest=/var/lib/solar/repositories/templates state=link owner=vagrant
- name: Starting docker containers - name: Starting docker containers
shell: docker-compose up -d chdir=/vagrant shell: docker-compose up -d chdir=/vagrant

View File

@ -13,6 +13,10 @@ Resources are defined in ``meta.yaml`` file. This file is responsible for basic
configuration of given resource. Below is an explanation what constitutes configuration of given resource. Below is an explanation what constitutes
typical resource. typical resource.
.. TODO: change to openstack/solar-resources later
.. note::
You can find example resources https://github.com/Mirantis/solar-resources
Basic resource structure Basic resource structure
------------------------ ------------------------

View File

@ -5,9 +5,7 @@ solar-celery:
volumes: volumes:
- /vagrant/.vagrant:/vagrant/.vagrant - /vagrant/.vagrant:/vagrant/.vagrant
- /vagrant:/solar - /vagrant:/solar
- /vagrant/templates:/vagrant/templates - /vagrant/solar-resources:/vagrant/solar-resources
- /vagrant/resources:/vagrant/resources
- /vagrant/library:/vagrant/library
- /root/.ssh:/root/.ssh - /root/.ssh:/root/.ssh
- ./bootstrap/playbooks/celery.yaml:/celery.yaml - ./bootstrap/playbooks/celery.yaml:/celery.yaml
- /var/lib/solar/repositories:/var/lib/solar/repositories - /var/lib/solar/repositories:/var/lib/solar/repositories

View File

@ -1,13 +0,0 @@
# Demo of the `solar_bootstrap` Resource
You need to instantiate Vagrant with a slave node which is unprovisioned
(i.e. started from the `trusty64` Vagrant box).
You can start the boxes from the `Vagrantfile` in master directory and
`vagrant-settings.yml` from this directory.
Running
```bash
python example-bootstrap.py deploy
```
will deploy full Solar env to node `solar-dev2`.

View File

@ -1,89 +0,0 @@
#!/usr/bin/env python
import click
import sys
import time
from solar.core import actions
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import composer as cr
from solar import errors
from solar.dblayer.model import ModelMeta
@click.group()
def main():
pass
def setup_resources():
ModelMeta.remove_all()
node2 = cr.create('node2', 'resources/ro_node/', {
'ip': '10.0.0.4',
'ssh_key': '/vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key',
'ssh_user': 'vagrant'
})[0]
solar_bootstrap2 = cr.create('solar_bootstrap2', 'resources/solar_bootstrap', {'master_ip': '10.0.0.2'})[0]
signals.connect(node2, solar_bootstrap2)
has_errors = False
for r in locals().values():
if not isinstance(r, resource.Resource):
continue
print 'Validating {}'.format(r.name)
errors = validation.validate_resource(r)
if errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, errors)
if has_errors:
sys.exit(1)
resources_to_run = [
'solar_bootstrap2',
]
@click.command()
def deploy():
setup_resources()
# run
resources = resource.load_all()
resources = {r.name: r for r in resources}
for name in resources_to_run:
try:
actions.resource_action(resources[name], 'run')
except errors.SolarError as e:
print 'WARNING: %s' % str(e)
raise
time.sleep(10)
@click.command()
def undeploy():
resources = resource.load_all()
resources = {r.name: r for r in resources}
for name in reversed(resources_to_run):
try:
actions.resource_action(resources[name], 'remove')
except errors.SolarError as e:
print 'WARNING: %s' % str(e)
ModelMeta.remove_all()
main.add_command(deploy)
main.add_command(undeploy)
if __name__ == '__main__':
main()

View File

@ -1,5 +0,0 @@
# rename it to vagrant-settings.yml then Vagrantfile
# will use values from this file
slaves_count: 3
slaves_image: ubuntu/trusty64

View File

@ -1,10 +0,0 @@
This example shows how to use solar via CLI.
Usage
=====
Run
`bash ./example.sh`
after this you can run `solar orch report last` and wait until all tasks have status SUCCESS.

View File

@ -1,18 +0,0 @@
#!/bin/bash
set -eux
function deploy {
# this two commands will clean db
solar resource clear_all
solar resource create nodes templates/nodes '{"count": 1}'
solar resource create mariadb1 /vagrant/resources/mariadb_service image=mariadb port=3306
solar connect node1 mariadb1
solar changes stage
solar changes process
solar orch run-once last
solar orch report last
}
deploy

View File

@ -1,95 +0,0 @@
Very simple solar example two nodes + hosts file mapping
Run:
`python examples/hosts_file/hosts.py`
Then you can continue with standard solar things:
```
solar changes stage -d
solar changes process
solar or run-once last
watch -n 1 solar or report last
```
Wait until all actions have state `SUCCESS`,
after that check `/etc/hosts` files on both nodes, it will contain entries like:
```
10.0.0.3 first1441705177.99
10.0.0.4 second1441705178.0
```
If you want to try out revert functionality - you can do it in a next way:
After you created all the stuff, print history like this:
`solar ch history`
Output:
```
log task=hosts_file1.run uid=282fe919-6059-4100-affc-56a2b3992d9d
log task=hosts_file2.run uid=774f5a49-00f1-4bae-8a77-90d1b2d54164
log task=node1.run uid=2559f22c-5aa9-4c05-91c6-b70884190a56
log task=node2.run uid=18f06abe-3e8d-4356-b172-128e1dded0e6
```
Now you can try to revert creation of hosts_file1
```
solar ch revert 282fe919-6059-4100-affc-56a2b3992d9d
solar ch stage
log task=hosts_file1.remove uid=1fe456c1-a847-4902-88bf-b7f2c5687d40
solar ch process
solar or run-once last
watch -n 1 solar or report last
```
For now this file will be simply cleaned (more cophisticated task can be added later).
And you can create revert of your revert, which will lead to created hosts_file1
resource and /etc/hosts with appropriate content
```
solar ch revert 282fe919-6059-4100-affc-56a2b3992d9d
solar ch stage
log task=hosts_file1.remove uid=1fe456c1-a847-4902-88bf-b7f2c5687d40
solar ch process
solar changes run-once last
watch -n 1 solar changes report last
```
After this you can revert your result of your previous revert, which will
create this file with relevant content.
```
solar ch history -n 1
log task=hosts_file1.remove uid=1fe456c1-a847-4902-88bf-b7f2c5687d40
solar ch revert 1fe456c1-a847-4902-88bf-b7f2c5687d40
solar ch stage
log task=hosts_file1.run uid=493326b2-989f-4b94-a22c-0bbd0fc5e755
solar ch process
solar changes run-once last
watch -n 1 solar changes report last
```
How to discard pending changes ?
After database was populated by some example, lets say
```
python examples/hosts_file/hosts.py deploy
```
User is able to discard all changes with
```
solar ch discard
```
Or any particular change with
```
solar ch stage
log task=hosts_file1.run uid=a5990538-c9c6-49e4-8d58-29fae9c7aaed
solar ch discard a5990538-c9c6-49e4-8d58-29fae9c7aaed
```

View File

@ -1,40 +0,0 @@
#!/usr/bin/python
import click
import sys
import time
from solar.core import signals
from solar.core.resource import composer as cr
from solar.dblayer.model import ModelMeta
def run():
ModelMeta.remove_all()
resources = cr.create('nodes', 'templates/nodes', {'count': 2})
node1, node2 = [x for x in resources if x.name.startswith('node')]
hosts1, hosts2 = [x for x in resources
if x.name.startswith('hosts_file')]
node1.connect(hosts1, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})
node2.connect(hosts1, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})
node1.connect(hosts2, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})
node2.connect(hosts2, {
'name': 'hosts:name',
'ip': 'hosts:ip',
})
run()

View File

@ -1,15 +0,0 @@
Usage
=====
Run it from /vagrant dir:
```
solar resource clear_all
solar resource create nodes templates/nodes '{"count": 1}'
solar resource create librarian_example examples/librarian/librarian.yaml '{"node": "node1"}'
solar changes stage
solar changes process
solar orch run-once
solar orch report -w 100
```

View File

@ -1,22 +0,0 @@
id: librarian_examples
resources:
- id: rabbitmq_service1
from: resources/rabbitmq_service
location: #{node}#
input:
management_port: 15672
port: 5672
- id: librarian
location: #{node}#
from: resources/librarian
input:
modules:
- rabbitmq_service1::module::NO_EVENTS
events:
- type: depends_on
parent_action: librarian.run
state: success
child_action: rabbitmq_service1.run

View File

@ -1,30 +0,0 @@
Current example will do following things:
- fetch fuel-library from github
- use ./update_modules.sh to fetch librarian dependencies
- generate ceph keys on a solar-dev1
- install ceph-mon on solar-dev1 (INPROGRESS)
- install ceph-osd on solar-dev2 (TODO)
- imlement removal mechanism for ceph-mon/ceph-osd (TODO)
To use it:
```
python exaples/library_ceph/ceph.py
solar ch stage && solar ch process
solar or run-once last -w 120
```
If it will fail you can run particular resource action, with a lot of
debug info.
```
solar res action run ceph_mon1
```
To add repositories use
```
solar resource create apt1 templates/mos_repos node=node1 index=1
```

View File

@ -1,73 +0,0 @@
from solar.core.resource import composer as cr
from solar.dblayer.model import ModelMeta
import yaml
STORAGE = {'objects_ceph': True,
'osd_pool_size': 2,
'pg_num': 128}
KEYSTONE = {'admin_token': 'abcde'}
NETWORK_SCHEMA = {
'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}},
'roles': {'ceph/replication': 'eth1',
'ceph/public': 'eth1'}
}
NETWORK_METADATA = yaml.load("""
solar-dev1:
uid: '1'
fqdn: solar-dev1
network_roles:
ceph/public: 10.0.0.3
ceph/replication: 10.0.0.3
node_roles:
- ceph-mon
name: solar-dev1
""")
def deploy():
ModelMeta.remove_all()
resources = cr.create('nodes', 'templates/nodes', {'count': 2})
first_node, second_node = [x for x in resources if x.name.startswith('node')]
first_transp = next(x for x in resources if x.name.startswith('transport'))
library = cr.create('library1', 'resources/fuel_library', {})[0]
first_node.connect(library)
keys = cr.create('ceph_key', 'resources/ceph_keys', {})[0]
first_node.connect(keys)
remote_file = cr.create('ceph_key2', 'resources/remote_file',
{'dest': '/var/lib/astute/'})[0]
second_node.connect(remote_file)
keys.connect(remote_file, {'ip': 'remote_ip', 'path': 'remote_path'})
first_transp.connect(remote_file, {'transports': 'remote'})
ceph_mon = cr.create('ceph_mon1', 'resources/ceph_mon',
{'storage': STORAGE,
'keystone': KEYSTONE,
'network_scheme': NETWORK_SCHEMA,
'ceph_monitor_nodes': NETWORK_METADATA,
'ceph_primary_monitor_node': NETWORK_METADATA,
'role': 'controller',
})[0]
managed_apt = cr.create(
'managed_apt1', 'templates/mos_repos',
{'node': first_node.name, 'index': 0})[-1]
keys.connect(ceph_mon, {})
first_node.connect(ceph_mon,
{'ip': ['ip', 'public_vip', 'management_vip']})
library.connect(ceph_mon, {'puppet_modules': 'puppet_modules'})
managed_apt.connect(ceph_mon, {})
if __name__ == '__main__':
deploy()

View File

@ -1,25 +0,0 @@
Bootstraping lxc containers using solar and roles from os-ansible-deployment
At first run:
`python examples/lxc/example-lxc.py deploy`
It will do several things:
* Prepare about ~10 containers on solar-dev1
* Add linux bridge on solar-dev and solar-dev1 with uid br-int53
* Setup vxlan tunnel for solar-dev and solar-dev1
* Generate ssh key and inject it into containers
Later this containers can be used as regular nodes in solar.
Check rabbitmq example at the end of the file.
To deploy everything use usual solar commands.
```
solar changes stage -d
solar changes process
solar orch run-once last
watch -n 1 solar orch report last
```
Wait until all actions have state `SUCCESS`

View File

@ -1,144 +0,0 @@
#!/usr/bin/env python
# To run:
# example-lxc.py deploy
# solar changes stage
# solar changes process
# solar orch run-once last
# watch 'solar orch report last'
import click
from solar.core import signals
from solar.core.resource import composer as cr
from solar.system_log import change
from solar.cli import orch
from solar.dblayer.model import ModelMeta
@click.group()
def main():
pass
def lxc_template(idx):
return {
'user': 'root',
'mgmt_ip': '172.18.11.{}'.format(idx),
'container_name': 'test{}'.format(idx),
'inventory_hostname': 'test{}'.format(idx),
'properties':
{'container_release': 'trusty'},
'container_networks':
{'mgmt': {
'address': '172.18.11.{}'.format(idx), # address for container
'bridge': 'br-int53', # bridge to attach veth pair
'bridge_address': '172.18.11.253/24',
'interface': 'eth1', # interface name in container
'netmask': '255.255.255.0',
'type': 'veth'}}
}
@click.command()
def deploy():
ModelMeta.remove_all()
node1 = cr.create('nodes', 'templates/nodes', {})[0]
seed = cr.create('nodes', 'templates/seed_node', {})[0]
ssh_key = cr.create('ssh_key1', 'resources/ssh_key', {
'keys_dir': '/vagrant/.ssh',
'private_key': '/vagrant/.ssh/id_rsa',
'public_key': '/vagrant/.ssh/id_rsa.pub',
'passphrase': '',
})[0]
signals.connect(seed, ssh_key)
cnets1 = cr.create('cnets1', 'resources/container_networks', {
'networks':
{'mgmt': {
'bridge': 'br-int53',
'bridge_address': '172.18.11.254/24'
}}
})[0]
cnets2 = cr.create('cnets2', 'resources/container_networks', {
'networks':
{'mgmt': {
'bridge': 'br-int53',
'bridge_address': '172.18.11.253/24'
}}
})[0]
signals.connect(seed, cnets1)
signals.connect(node1, cnets2)
vxlan_mesh1 = cr.create('vxlan_mesh1', 'resources/vxlan_mesh', {
'id': 53,
'parent': 'eth1',
'master': 'br-int53'
})[0]
vxlan_mesh2 = cr.create('vxlan_mesh2', 'resources/vxlan_mesh', {
'id': 53,
'parent': 'eth1',
'master': 'br-int53'
})[0]
# seed node should be connected anyway, because we need to be able to ssh
# into containers from any node
signals.connect(seed, vxlan_mesh1)
signals.connect(node1, vxlan_mesh2)
lxc_infra1 = cr.create('lxc_infra1', 'resources/lxc_host', {})[0]
signals.connect(node1, lxc_infra1)
lxc_hosts = range(28, 35)
hosts_map = {}
for idx in lxc_hosts:
lxc_host_idx = cr.create(
'lxc_host{}'.format(idx),
'resources/lxc_container', lxc_template(idx))[0]
hosts_map[idx] = lxc_host_idx
signals.connect(node1, lxc_host_idx, {
'ip': ['ansible_ssh_host', 'physical_host'],
})
# this is a required to introduce depends on relationship between lxc infre
# and lxc container
signals.connect(lxc_infra1, lxc_host_idx, {'provides': 'requires'})
signals.connect(cnets2, lxc_host_idx)
signals.connect(ssh_key, lxc_host_idx, {
'public_key': 'pub_key',
'private_key': 'user_key'})
# RABBIT
rabbitmq_service1 = cr.create('rabbitmq_service1', 'resources/rabbitmq_service/', {
'management_port': 15672,
'port': 5672,
})[0]
openstack_vhost = cr.create('openstack_vhost', 'resources/rabbitmq_vhost/', {
'vhost_name': 'openstack'
})[0]
openstack_rabbitmq_user = cr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {
'user_name': 'openstack',
'password': 'openstack_password'
})[0]
signals.connect(hosts_map[28], rabbitmq_service1, {
'mgmt_ip': 'ip',
'user_key': 'ssh_key',
'user': 'ssh_user'})
signals.connect(rabbitmq_service1, openstack_vhost)
signals.connect(rabbitmq_service1, openstack_rabbitmq_user)
signals.connect(openstack_vhost, openstack_rabbitmq_user, {
'vhost_name',
})
print change.send_to_orchestration()
main.add_command(deploy)
if __name__ == '__main__':
main()

View File

@ -1,15 +0,0 @@
Deploying simple two node OpenStack env.
You need to run it from main solar directory. To prepare resources run:
`python examples/openstack/openstack.py create_all`
Then to start deployment:
`solar changes stage
solar changes process
solar orch run-once last`
To see the progress:
`solar orch report`

View File

@ -1,873 +0,0 @@
#!/usr/bin/env python
import click
import sys
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import composer as cr
from solar import events as evapi
from solar.dblayer.model import ModelMeta
PROFILE = False
#PROFILE = True
if PROFILE:
import StringIO
import cProfile
import pstats
pr = cProfile.Profile()
pr.enable()
# TODO
# Resource for repository OR puppet apt-module in run.pp
# add-apt-repository cloud-archive:juno
# To discuss: install stuff in Docker container
# NOTE
# No copy of manifests, pull from upstream (implemented in the librarian resource)
# Official puppet manifests, not fuel-library
@click.group()
def main():
pass
def prepare_nodes(nodes_count):
resources = cr.create('nodes', 'templates/nodes', {"count": nodes_count})
nodes = resources.like('node')
resources = cr.create('nodes_network', 'templates/nodes_network', {"count": nodes_count})
nodes_sdn = resources.like('node')
r = {}
for node, node_sdn in zip(nodes, nodes_sdn):
r[node.name] = node
r[node_sdn.name] = node_sdn
# LIBRARIAN
librarian = cr.create('librarian_{}'.format(node.name), 'resources/librarian', {})[0]
r[librarian.name] = librarian
node.connect(librarian, {})
# NETWORKING
# TODO(bogdando) node's IPs should be populated as br-mgmt IPs, but now are hardcoded in templates
signals.connect(node, node_sdn)
node_sdn.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update'))
signals.connect(node, node_sdn)
node_sdn.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, node_sdn.name, actions=('run', 'update'))
return r
def setup_base(node, librarian):
# MARIADB
mariadb_service = cr.create('mariadb_service1', 'resources/mariadb_service', {
'image': 'mariadb',
'port': 3306
})[0]
node.connect(mariadb_service)
# RABBIT
rabbitmq_service = cr.create('rabbitmq_service1', 'resources/rabbitmq_service/', {
'management_port': 15672,
'port': 5672,
})[0]
openstack_vhost = cr.create('openstack_vhost', 'resources/rabbitmq_vhost/', {
'vhost_name': 'openstack'
})[0]
openstack_rabbitmq_user = cr.create('openstack_rabbitmq_user', 'resources/rabbitmq_user/', {
'user_name': 'openstack',
'password': 'openstack_password'
})[0]
node.connect(rabbitmq_service)
rabbitmq_service.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, rabbitmq_service.name, actions=('run', 'update'))
rabbitmq_service.connect(openstack_vhost)
rabbitmq_service.connect(openstack_rabbitmq_user)
openstack_vhost.connect(openstack_rabbitmq_user, {
'vhost_name',
})
return {'mariadb_service': mariadb_service,
'rabbitmq_service1': rabbitmq_service,
'openstack_vhost': openstack_vhost,
'openstack_rabbitmq_user': openstack_rabbitmq_user}
def setup_keystone(node, librarian, mariadb_service, openstack_rabbitmq_user):
keystone_puppet = cr.create('keystone_puppet', 'resources/keystone_puppet', {})[0]
keystone_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, keystone_puppet.name, actions=('run', 'update'))
evapi.add_dep(openstack_rabbitmq_user.name, keystone_puppet.name, actions=('run', 'update'))
keystone_db = cr.create('keystone_db', 'resources/mariadb_db/', {
'db_name': 'keystone_db',
'login_user': 'root'
})[0]
keystone_db_user = cr.create('keystone_db_user', 'resources/mariadb_user/', {
'user_name': 'keystone',
'user_password': 'keystone',
})[0]
keystone_service_endpoint = cr.create('keystone_service_endpoint', 'resources/keystone_service_endpoint', {
'endpoint_name': 'keystone',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2.0',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2.0',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2.0',
'description': 'OpenStack Identity Service',
'type': 'identity'
})[0]
admin_tenant = cr.create('admin_tenant', 'resources/keystone_tenant', {
'tenant_name': 'admin'
})[0]
admin_user = cr.create('admin_user', 'resources/keystone_user', {
'user_name': 'admin',
'user_password': 'admin'
})[0]
admin_role = cr.create('admin_role', 'resources/keystone_role', {
'role_name': 'admin'
})[0]
services_tenant = cr.create('services_tenant', 'resources/keystone_tenant', {
'tenant_name': 'services'
})[0]
admin_role_services = cr.create('admin_role_services', 'resources/keystone_role', {
'role_name': 'admin'
})[0]
node.connect(keystone_db)
node.connect(keystone_db_user)
node.connect(keystone_puppet)
mariadb_service.connect(keystone_db, {
'port': 'login_port',
'root_user': 'login_user',
'root_password': 'login_password',
'ip' : 'db_host',
})
keystone_db.connect(keystone_db_user, {
'db_name',
'login_port',
'login_user',
'login_password',
'db_host'
})
node.connect(keystone_service_endpoint)
keystone_puppet.connect(keystone_service_endpoint, {
'admin_token': 'admin_token',
'admin_port': ['admin_port', 'keystone_admin_port'],
'ip': ['keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
'port': ['internal_port', 'public_port'],
})
keystone_puppet.connect(admin_tenant)
keystone_puppet.connect(admin_tenant, {
'admin_port': 'keystone_port',
'ip': 'keystone_host'
})
admin_tenant.connect(admin_user)
admin_user.connect(admin_role)
admin_tenant.connect(admin_role, { 'tenant_name' })
admin_user.connect(admin_role_services)
services_tenant.connect(admin_role_services, { 'tenant_name' })
keystone_puppet.connect(services_tenant)
keystone_puppet.connect(services_tenant, {
'admin_port': 'keystone_port',
'ip': 'keystone_host'
})
keystone_db.connect(keystone_puppet, {
'db_name',
})
keystone_db_user.connect(keystone_puppet, {
'user_name': 'db_user',
'user_password': 'db_password',
})
mariadb_service.connect(keystone_puppet, {
'ip': 'db_host',
'port': 'db_port',
})
return {'keystone_puppet': keystone_puppet,
'keystone_db': keystone_db,
'keystone_db_user': keystone_db_user,
'keystone_service_endpoint': keystone_service_endpoint,
'admin_tenant': admin_tenant,
'admin_user': admin_user,
'admin_role': admin_role,
'services_tenant': services_tenant,
'admin_role_services': admin_role_services,
}
def setup_openrc(node, keystone_puppet, admin_user):
# OPENRC
openrc = cr.create('openrc_file', 'resources/openrc_file', {})[0]
node.connect(openrc)
keystone_puppet.connect(openrc, {'ip': 'keystone_host', 'admin_port':'keystone_port'})
admin_user.connect(openrc, {'user_name': 'user_name','user_password':'password', 'tenant_name': 'tenant'})
return {'openrc_file' : openrc}
def setup_neutron(node, librarian, rabbitmq_service, openstack_rabbitmq_user, openstack_vhost):
# NEUTRON
# Deploy chain neutron -> (plugins) -> neutron_server -> ( agents )
neutron_puppet = cr.create('neutron_puppet', 'resources/neutron_puppet', {
'core_plugin': 'neutron.plugins.ml2.plugin.Ml2Plugin'
})[0]
node.connect(neutron_puppet)
neutron_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, neutron_puppet.name, actions=('run', 'update'))
rabbitmq_service.connect(neutron_puppet, {
'ip': 'rabbit_host',
'port': 'rabbit_port'
})
openstack_rabbitmq_user.connect(neutron_puppet, {
'user_name': 'rabbit_user',
'password': 'rabbit_password'})
openstack_vhost.connect(neutron_puppet, {
'vhost_name': 'rabbit_virtual_host'})
return {'neutron_puppet': neutron_puppet}
def setup_neutron_api(node, mariadb_service, admin_user, keystone_puppet, services_tenant, neutron_puppet):
# NEUTRON PLUGIN AND NEUTRON API (SERVER)
neutron_plugins_ml2 = cr.create('neutron_plugins_ml2', 'resources/neutron_plugins_ml2_puppet', {})[0]
node.connect(neutron_plugins_ml2)
neutron_server_puppet = cr.create('neutron_server_puppet', 'resources/neutron_server_puppet', {
'sync_db': True,
})[0]
evapi.add_dep(neutron_puppet.name, neutron_server_puppet.name, actions=('run',))
evapi.add_dep(neutron_plugins_ml2.name, neutron_server_puppet.name, actions=('run',))
evapi.add_dep(neutron_puppet.name, neutron_plugins_ml2.name, actions=('run',))
neutron_db = cr.create('neutron_db', 'resources/mariadb_db/', {
'db_name': 'neutron_db', 'login_user': 'root'})[0]
neutron_db_user = cr.create('neutron_db_user', 'resources/mariadb_user/', {
'user_name': 'neutron', 'user_password': 'neutron', 'login_user': 'root'})[0]
neutron_keystone_user = cr.create('neutron_keystone_user', 'resources/keystone_user', {
'user_name': 'neutron',
'user_password': 'neutron'
})[0]
neutron_keystone_role = cr.create('neutron_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'
})[0]
evapi.add_dep(neutron_keystone_role.name, neutron_server_puppet.name, actions=('run',))
neutron_keystone_service_endpoint = cr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
'endpoint_name': 'neutron',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
'publicurl': 'http://{{public_ip}}:{{public_port}}',
'description': 'OpenStack Network Service',
'type': 'network'
})[0]
node.connect(neutron_db)
node.connect(neutron_db_user)
mariadb_service.connect(neutron_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(neutron_db_user, {'port': 'login_port', 'root_password': 'login_password'})
neutron_db.connect(neutron_db_user, {'db_name', 'db_host'})
neutron_db_user.connect(neutron_server_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password',
'db_host' : 'db_host'})
mariadb_service.connect(neutron_server_puppet, {
'port': 'db_port',
'ip' : 'db_host'})
node.connect(neutron_server_puppet)
admin_user.connect(neutron_server_puppet, {
'user_name': 'auth_user',
'user_password': 'auth_password',
'tenant_name': 'auth_tenant'
})
keystone_puppet.connect(neutron_server_puppet, {
'ip': 'auth_host',
'port': 'auth_port'
})
services_tenant.connect(neutron_keystone_user)
neutron_keystone_user.connect(neutron_keystone_role)
keystone_puppet.connect(neutron_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host'],
'admin_port': 'keystone_admin_port',
'admin_token': 'admin_token',
})
neutron_puppet.connect(neutron_keystone_service_endpoint, {
'ip': ['admin_ip', 'internal_ip', 'public_ip'],
'bind_port': ['admin_port', 'internal_port', 'public_port'],
})
return {'neutron_server_puppet': neutron_server_puppet,
'neutron_plugins_ml2': neutron_plugins_ml2,
'neutron_db': neutron_db,
'neutron_db_user': neutron_db_user,
'neutron_keystone_user': neutron_keystone_user,
'neutron_keystone_role': neutron_keystone_role,
'neutron_keystone_service_endpoint': neutron_keystone_service_endpoint}
def setup_neutron_agent(node, neutron_server_puppet):
# NEUTRON ML2 PLUGIN & ML2-OVS AGENT WITH GRE
neutron_agents_ml2 = cr.create('neutron_agents_ml2', 'resources/neutron_agents_ml2_ovs_puppet', {
# TODO(bogdando) these should come from the node network resource
'enable_tunneling': True,
'tunnel_types': ['gre'],
'local_ip': '10.1.0.13' # should be the IP addr of the br-mesh int.
})[0]
node.connect(neutron_agents_ml2)
evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml2.name, actions=('run',))
# NEUTRON DHCP, L3, metadata agents
neutron_agents_dhcp = cr.create('neutron_agents_dhcp', 'resources/neutron_agents_dhcp_puppet', {})[0]
node.connect(neutron_agents_dhcp)
evapi.add_dep(neutron_server_puppet.name, neutron_agents_dhcp.name, actions=('run',))
neutron_agents_l3 = cr.create('neutron_agents_l3', 'resources/neutron_agents_l3_puppet', {
# TODO(bogdando) these should come from the node network resource
'metadata_port': 8775,
'external_network_bridge': 'br-floating',
})[0]
node.connect(neutron_agents_l3)
evapi.add_dep(neutron_server_puppet.name, neutron_agents_l3.name, actions=('run',))
neutron_agents_metadata = cr.create('neutron_agents_metadata', 'resources/neutron_agents_metadata_puppet', {
'sh2ared_secret': 'secret',
})[0]
node.connect(neutron_agents_metadata)
neutron_server_puppet.connect(neutron_agents_metadata, {
'auth_host', 'auth_port', 'auth_password',
'auth_tenant', 'auth_user',
})
return {'neutron_agents_ml2': neutron_agents_ml2,
'neutron_agents_dhcp': neutron_agents_dhcp,
'neutron_agents_metadata': neutron_agents_metadata}
def setup_neutron_compute(node, librarian, neutron_puppet, neutron_server_puppet):
# NEUTRON FOR COMPUTE (node1)
# Deploy chain neutron -> (plugins) -> ( agents )
name = node.name
neutron_puppet2 = cr.create('neutron_puppet_{}'.format(name), 'resources/neutron_puppet', {})[0]
neutron_puppet2.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, neutron_puppet2.name, actions=('run', 'update'))
dep = evapi.Dep(librarian.name, 'update', state='SUCESS',
child=neutron_puppet2.name, child_action='run')
evapi.add_event(dep)
node.connect(neutron_puppet2)
neutron_puppet.connect(neutron_puppet2, {
'rabbit_host', 'rabbit_port',
'rabbit_user', 'rabbit_password',
'rabbit_virtual_host',
'package_ensure', 'core_plugin',
})
# NEUTRON OVS PLUGIN & AGENT WITH GRE FOR COMPUTE (node1)
neutron_plugins_ml22 = cr.create('neutron_plugins_ml_{}'.format(name), 'resources/neutron_plugins_ml2_puppet', {})[0]
node.connect(neutron_plugins_ml22)
evapi.add_dep(neutron_puppet2.name, neutron_plugins_ml22.name, actions=('run',))
evapi.add_dep(neutron_server_puppet.name, neutron_plugins_ml22.name, actions=('run',))
neutron_agents_ml22 = cr.create('neutron_agents_ml_{}'.format(name), 'resources/neutron_agents_ml2_ovs_puppet', {
# TODO(bogdando) these should come from the node network resource
'enable_tunneling': True,
'tunnel_types': ['gre'],
'local_ip': '10.1.0.14' # Should be the IP addr of the br-mesh int.
})[0]
node.connect(neutron_agents_ml22)
evapi.add_dep(neutron_puppet2.name, neutron_agents_ml22.name, actions=('run',))
evapi.add_dep(neutron_server_puppet.name, neutron_agents_ml22.name, actions=('run',))
return {'neutron_puppet2': neutron_puppet2,
'neutron_plugins_ml22': neutron_plugins_ml22,
'neutron_agents_ml22': neutron_agents_ml22}
def setup_cinder(node, librarian, rabbitmq_service, mariadb_service, keystone_puppet, admin_user, openstack_vhost, openstack_rabbitmq_user, services_tenant):
# CINDER
cinder_puppet = cr.create('cinder_puppet', 'resources/cinder_puppet', {})[0]
cinder_db = cr.create('cinder_db', 'resources/mariadb_db/', {
'db_name': 'cinder_db', 'login_user': 'root'})[0]
cinder_db_user = cr.create('cinder_db_user', 'resources/mariadb_user/', {
'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
cinder_keystone_user = cr.create('cinder_keystone_user', 'resources/keystone_user', {
'user_name': 'cinder', 'user_password': 'cinder'})[0]
cinder_keystone_role = cr.create('cinder_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'})[0]
cinder_keystone_service_endpoint = cr.create(
'cinder_keystone_service_endpoint',
'resources/keystone_service_endpoint', {
'endpoint_name': 'cinder',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
'description': 'OpenStack Block Storage Service', 'type': 'volumev2'})[0]
node.connect(cinder_puppet)
cinder_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, cinder_puppet.name, actions=('run', 'update'))
node.connect(cinder_db)
node.connect(cinder_db_user)
rabbitmq_service.connect(cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
admin_user.connect(cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #?
openstack_vhost.connect(cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
openstack_rabbitmq_user.connect(cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
mariadb_service.connect(cinder_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'})
cinder_db.connect(cinder_db_user, {'db_name', 'db_host'})
cinder_db_user.connect(cinder_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password'})
mariadb_service.connect(cinder_puppet, {
'port': 'db_port',
'ip': 'db_host'})
keystone_puppet.connect(cinder_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port?
services_tenant.connect(cinder_keystone_user)
cinder_keystone_user.connect(cinder_keystone_role)
cinder_keystone_user.connect(cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'})
mariadb_service.connect(cinder_puppet, {'ip':'ip'})
cinder_puppet.connect(cinder_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
'port': ['admin_port', 'internal_port', 'public_port'],})
keystone_puppet.connect(cinder_keystone_service_endpoint, {
'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
# CINDER GLANCE
# Deploy chain: cinder_puppet -> cinder_glance -> ( cinder_api, cinder_scheduler, cinder_volume )
cinder_glance_puppet = cr.create('cinder_glance_puppet', 'resources/cinder_glance_puppet', {})[0]
node.connect(cinder_glance_puppet)
evapi.add_dep(cinder_puppet.name, cinder_glance_puppet.name, actions=('run',))
return {'cinder_puppet': cinder_puppet,
'cinder_db': cinder_db,
'cinder_db_user': cinder_db_user,
'cinder_keystone_user': cinder_keystone_user,
'cinder_keystone_role': cinder_keystone_role,
'cinder_keystone_service_endpoint': cinder_keystone_service_endpoint,
'cinder_glance_puppet': cinder_glance_puppet}
def setup_cinder_api(node, cinder_puppet):
# CINDER API
cinder_api_puppet = cr.create('cinder_api_puppet', 'resources/cinder_api_puppet', {})[0]
node.connect(cinder_api_puppet)
cinder_puppet.connect(cinder_api_puppet, {
'keystone_password', 'keystone_tenant', 'keystone_user'})
cinder_puppet.connect(cinder_api_puppet, {
'keystone_host': 'keystone_auth_host',
'keystone_port': 'keystone_auth_port'})
evapi.add_react(cinder_puppet.name, cinder_api_puppet.name, actions=('update',))
return {'cinder_api_puppet': cinder_api_puppet}
def setup_cinder_scheduler(node, cinder_puppet):
# CINDER SCHEDULER
cinder_scheduler_puppet = cr.create('cinder_scheduler_puppet', 'resources/cinder_scheduler_puppet', {})[0]
node.connect(cinder_scheduler_puppet)
cinder_puppet.connect(cinder_scheduler_puppet)
evapi.add_react(cinder_puppet.name, cinder_scheduler_puppet.name, actions=('update',))
return {'cinder_scheduler_puppet': cinder_scheduler_puppet}
def setup_cinder_volume(node, cinder_puppet):
# CINDER VOLUME
cinder_volume = cr.create('cinder_volume_{}'.format(node.name), 'resources/volume_group',
{'path': '/root/cinder.img', 'volume_name': 'cinder-volume'})[0]
node.connect(cinder_volume)
cinder_volume_puppet = cr.create('cinder_volume_puppet', 'resources/cinder_volume_puppet', {})[0]
node.connect(cinder_volume_puppet)
cinder_puppet.connect(cinder_volume_puppet)
evapi.add_react(cinder_puppet.name, cinder_volume_puppet.name, actions=('update',))
cinder_volume.connect(cinder_volume_puppet, {'volume_name': 'volume_group'})
return {'cinder_volume_puppet': cinder_volume_puppet}
def setup_nova(node, librarian, mariadb_service, rabbitmq_service, admin_user, openstack_vhost, services_tenant, keystone_puppet, openstack_rabbitmq_user):
# NOVA
nova_puppet = cr.create('nova_puppet', 'resources/nova_puppet', {})[0]
nova_db = cr.create('nova_db', 'resources/mariadb_db/', {
'db_name': 'nova_db',
'login_user': 'root'})[0]
nova_db_user = cr.create('nova_db_user', 'resources/mariadb_user/', {
'user_name': 'nova',
'user_password': 'nova',
'login_user': 'root'})[0]
nova_keystone_user = cr.create('nova_keystone_user', 'resources/keystone_user', {
'user_name': 'nova',
'user_password': 'nova'})[0]
nova_keystone_role = cr.create('nova_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'})[0]
nova_keystone_service_endpoint = cr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
'endpoint_name': 'nova',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
'description': 'OpenStack Compute Service',
'type': 'compute'})[0]
node.connect(nova_puppet)
nova_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, nova_puppet.name, actions=('run', 'update'))
node.connect(nova_db)
node.connect(nova_db_user)
mariadb_service.connect(nova_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(nova_db_user, {
'port': 'login_port',
'root_password': 'login_password'})
admin_user.connect(nova_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #?
openstack_vhost.connect(nova_puppet, {'vhost_name': 'rabbit_virtual_host'})
nova_db.connect(nova_db_user, {'db_name', 'db_host'})
services_tenant.connect(nova_keystone_user)
nova_keystone_user.connect(nova_keystone_role)
keystone_puppet.connect(nova_puppet, {
'ip': 'keystone_host',
'admin_port': 'keystone_port'})
nova_keystone_user.connect(nova_puppet, {
'user_name': 'keystone_user',
'tenant_name': 'keystone_tenant',
'user_password': 'keystone_password'})
rabbitmq_service.connect(nova_puppet, {
'ip': 'rabbit_host', 'port': 'rabbit_port'})
openstack_rabbitmq_user.connect(nova_puppet, {
'user_name': 'rabbit_userid',
'password': 'rabbit_password'})
keystone_puppet.connect(nova_keystone_service_endpoint, {
'ip': 'keystone_host',
'admin_port': 'keystone_admin_port',
'admin_token': 'admin_token'})
mariadb_service.connect(nova_puppet, {
'ip':'db_host',
'port': 'db_port'})
nova_db_user.connect(nova_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password'})
nova_puppet.connect(nova_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host', 'public_ip', 'internal_ip', 'admin_ip'],
'port': ['admin_port', 'internal_port', 'public_port'],
})
return {'nova_puppet': nova_puppet,
'nova_db': nova_db,
'nova_db_user': nova_db_user,
'nova_keystone_user': nova_keystone_user,
'nova_keystone_role': nova_keystone_role,
'nova_keystone_service_endpoint': nova_keystone_service_endpoint}
def setup_nova_api(node, nova_puppet, neutron_agents_metadata):
# NOVA API
nova_api_puppet = cr.create('nova_api_puppet', 'resources/nova_api_puppet', {})[0]
node.connect(nova_api_puppet)
nova_puppet.connect(nova_api_puppet, {
'keystone_tenant': 'admin_tenant_name',
'keystone_user': 'admin_user',
'keystone_password': 'admin_password',
'keystone_host': 'auth_host',
'keystone_port': 'auth_port'})
evapi.add_react(nova_puppet.name, nova_api_puppet.name, actions=('update',))
nova_api_puppet.connect(neutron_agents_metadata, {'ip': 'metadata_ip'})
return {'nova_api_puppet': nova_api_puppet}
def setup_nova_conductor(node, nova_puppet, nova_api_puppet):
# NOVA CONDUCTOR
nova_conductor_puppet = cr.create('nova_conductor_puppet', 'resources/nova_conductor_puppet', {})[0]
node.connect(nova_conductor_puppet)
nova_puppet.connect(nova_conductor_puppet)
evapi.add_dep(nova_api_puppet.name, nova_conductor_puppet.name, actions=('run',))
evapi.add_react(nova_puppet.name, nova_conductor_puppet.name, actions=('update',))
return {'nova_conductor': nova_conductor_puppet}
def setup_nova_scheduler(node, nova_puppet, nova_api_puppet):
# NOVA SCHEDULER
# NOTE(bogdando) Generic service is used. Package and service names for Ubuntu case
# come from https://github.com/openstack/puppet-nova/blob/5.1.0/manifests/params.pp
nova_scheduler_puppet = cr.create('nova_scheduler_puppet', 'resources/nova_generic_service_puppet', {
'title' : 'scheduler', 'package_name': 'nova-scheduler', 'service_name': 'nova-scheduler',
})[0]
node.connect(nova_scheduler_puppet)
evapi.add_dep(nova_puppet.name, nova_scheduler_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_scheduler_puppet.name, actions=('run',))
evapi.add_react(nova_puppet.name, nova_scheduler_puppet.name, actions=('update',))
return {'nova_scheduler_puppet': nova_scheduler_puppet}
def setup_nova_compute(node, librarian, nova_puppet, nova_api_puppet, neutron_server_puppet, neutron_keystone_service_endpoint, glance_api_puppet):
# NOVA COMPUTE
# Deploy chain (nova, node_networking(TODO)) -> (nova_compute_libvirt, nova_neutron) -> nova_compute
name = node.name
nova_compute_puppet = cr.create('nova_compute_puppet_{}'.format(name), 'resources/nova_compute_puppet', {})[0]
# TODO (bogdando) figure out how to use it for multiple glance api servers
nova_puppet2 = cr.create('nova_puppet_{}'.format(name), 'resources/nova_puppet', {
'glance_api_servers': '{{glance_api_servers_host}}:{{glance_api_servers_port}}'
})[0]
nova_puppet.connect(nova_puppet2, {
'ensure_package', 'rabbit_host',
'rabbit_password', 'rabbit_port', 'rabbit_userid',
'rabbit_virtual_host', 'db_user', 'db_password',
'db_name', 'db_host', 'keystone_password',
'keystone_port', 'keystone_host', 'keystone_tenant',
'keystone_user',
})
# TODO(bogdando): Make a connection for nova_puppet2.glance_api_servers = "glance_api_puppet.ip:glance_api_puppet.bind_port"
node.connect(nova_puppet2)
nova_puppet2.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, nova_puppet2.name, actions=('run', 'update'))
dep = evapi.Dep(librarian.name, 'update', state='SUCESS',
child=nova_puppet2.name, child_action='run')
evapi.add_event(dep)
node.connect(nova_compute_puppet)
evapi.add_dep(nova_puppet2.name, nova_compute_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_compute_puppet.name, actions=('run',))
evapi.add_react(nova_puppet2.name, nova_compute_puppet.name, actions=('run', 'update'))
# NOVA COMPUTE LIBVIRT, NOVA_NEUTRON
# NOTE(bogdando): changes nova config, so should notify nova compute service
nova_compute_libvirt_puppet = cr.create('nova_compute_libvirt_puppet_{}'.format(name), 'resources/nova_compute_libvirt_puppet', {})[0]
node.connect(nova_compute_libvirt_puppet)
evapi.add_dep(nova_puppet2.name, nova_compute_libvirt_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_compute_libvirt_puppet.name, actions=('run',))
# compute configuration for neutron, use http auth/endpoint protocols, keystone v2 auth hardcoded for the resource
nova_neutron_puppet = cr.create('nova_neutron_puppet_{}'.format(name), 'resources/nova_neutron_puppet', {})[0]
node.connect(nova_neutron_puppet)
evapi.add_dep(nova_puppet2.name, nova_neutron_puppet.name, actions=('run',))
evapi.add_dep(nova_api_puppet.name, nova_neutron_puppet.name, actions=('run',))
neutron_server_puppet.connect(nova_neutron_puppet, {
'auth_password': 'neutron_admin_password',
'auth_user': 'neutron_admin_username',
'auth_type': 'neutron_auth_strategy',
'auth_host': 'auth_host', 'auth_port': 'auth_port',
'auth_protocol': 'auth_protocol',
})
neutron_keystone_service_endpoint.connect(nova_neutron_puppet, {
'internal_ip':'neutron_endpoint_host',
'internal_port':'neutron_endpoint_port',
})
# Update glance_api_service for nova compute
glance_api_puppet.connect(nova_puppet2, {
'ip': 'glance_api_servers_host',
'bind_port': 'glance_api_servers_port'
})
# signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
# signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'})
# signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
return {'nova_compute_puppet': nova_compute_puppet,
'nova_puppet2': nova_puppet2,
'nova_compute_libvirt_puppet': nova_compute_libvirt_puppet,
'nova_neutron_puppet': nova_neutron_puppet,
'neutron_server_puppet': neutron_server_puppet}
def setup_glance_api(node, librarian, mariadb_service, admin_user, keystone_puppet, services_tenant, cinder_glance_puppet):
# GLANCE (base and API)
glance_api_puppet = cr.create('glance_api_puppet', 'resources/glance_puppet', {})[0]
glance_db_user = cr.create('glance_db_user', 'resources/mariadb_user/', {
'user_name': 'glance', 'user_password': 'glance', 'login_user': 'root'})[0]
glance_db = cr.create('glance_db', 'resources/mariadb_db/', {
'db_name': 'glance', 'login_user': 'root'})[0]
glance_keystone_user = cr.create('glance_keystone_user', 'resources/keystone_user', {
'user_name': 'glance', 'user_password': 'glance123'})[0]
glance_keystone_role = cr.create('glance_keystone_role', 'resources/keystone_role', {
'role_name': 'admin'})[0]
glance_keystone_service_endpoint = cr.create(
'glance_keystone_service_endpoint',
'resources/keystone_service_endpoint', {
'endpoint_name': 'glance',
'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
'publicurl': 'http://{{public_ip}}:{{public_port}}',
'description': 'OpenStack Image Service', 'type': 'image'})[0]
node.connect(glance_api_puppet)
glance_api_puppet.connect_with_events(librarian, {'module': 'modules'}, {})
evapi.add_dep(librarian.name, glance_api_puppet.name, actions=('run', 'update'))
node.connect(glance_db)
node.connect(glance_db_user)
admin_user.connect(glance_api_puppet, {
'user_name': 'keystone_user', 'user_password': 'keystone_password',
'tenant_name': 'keystone_tenant'}) #?
mariadb_service.connect(glance_db, {
'port': 'login_port',
'root_password': 'login_password',
'root_user': 'login_user',
'ip' : 'db_host'})
mariadb_service.connect(glance_db_user, {'port': 'login_port', 'root_password': 'login_password'})
glance_db.connect(glance_db_user, {'db_name', 'db_host'})
glance_db_user.connect(glance_api_puppet, {
'user_name':'db_user',
'db_name':'db_name',
'user_password':'db_password',
'db_host' : 'db_host'})
mariadb_service.connect(glance_api_puppet,{
'port': 'db_port',
'ip': 'db_host'})
keystone_puppet.connect(glance_api_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port?
services_tenant.connect(glance_keystone_user)
glance_keystone_user.connect(glance_keystone_role)
glance_keystone_user.connect(glance_api_puppet, {
'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant',
'user_password': 'keystone_password'})
mariadb_service.connect(glance_api_puppet, {'ip':'ip'})
glance_api_puppet.connect(glance_keystone_service_endpoint, {
'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
'bind_port': ['admin_port', 'internal_port', 'public_port'],})
keystone_puppet.connect(glance_keystone_service_endpoint, {
'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
# Update glance_api_service for cinder
glance_api_puppet.connect(cinder_glance_puppet, {
'ip': 'glance_api_servers_host',
'bind_port': 'glance_api_servers_port'
})
return {'glance_api_puppet': glance_api_puppet,
'glance_db_user': glance_db_user,
'glance_db': glance_db,
'glance_keystone_user': glance_keystone_user,
'glance_keystone_role': glance_keystone_role,
'glance_keystone_service_endpoint': glance_keystone_service_endpoint}
def setup_glance_registry(node, glance_api_puppet):
# GLANCE REGISTRY
glance_registry_puppet = cr.create('glance_registry_puppet', 'resources/glance_registry_puppet', {})[0]
node.connect(glance_registry_puppet)
glance_api_puppet.connect(glance_registry_puppet)
evapi.add_react(glance_api_puppet.name, glance_registry_puppet.name, actions=('update',))
# API and registry should not listen same ports
# should not use the same log destination and a pipeline,
# so disconnect them and restore the defaults
signals.disconnect_receiver_by_input(glance_registry_puppet, 'bind_port')
signals.disconnect_receiver_by_input(glance_registry_puppet, 'log_file')
signals.disconnect_receiver_by_input(glance_registry_puppet, 'pipeline')
glance_registry_puppet.update({
'bind_port': 9191,
'log_file': '/var/log/glance/registry.log',
'pipeline': 'keystone',
})
return {'glance_registry_puppet': glance_registry_puppet}
def validate():
has_errors = False
for r in locals().values():
if not isinstance(r, resource.Resource):
continue
print 'Validating {}'.format(r.name)
errors = validation.validate_resource(r)
if errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, errors)
if has_errors:
sys.exit(1)
def create_controller(node):
r = {r.name: r for r in resource.load_all()}
librarian_node = 'librarian_{}'.format(node)
r.update(setup_base(r[node], r[librarian_node]))
r.update(setup_keystone(r[node], r[librarian_node],
r['mariadb_service'], r['openstack_rabbitmq_user']))
r.update(setup_openrc(r[node], r['keystone_puppet'], r['admin_user']))
r.update(setup_neutron(r[node], r['librarian_{}'.format(node)], r['rabbitmq_service1'],
r['openstack_rabbitmq_user'], r['openstack_vhost']))
r.update(setup_neutron_api(r[node], r['mariadb_service'], r['admin_user'],
r['keystone_puppet'], r['services_tenant'], r['neutron_puppet']))
r.update(setup_neutron_agent(r[node], r['neutron_server_puppet']))
r.update(setup_cinder(r[node], r['librarian_{}'.format(node)], r['rabbitmq_service1'],
r['mariadb_service'], r['keystone_puppet'], r['admin_user'],
r['openstack_vhost'], r['openstack_rabbitmq_user'], r['services_tenant']))
r.update(setup_cinder_api(r[node], r['cinder_puppet']))
r.update(setup_cinder_scheduler(r[node], r['cinder_puppet']))
r.update(setup_cinder_volume(r[node], r['cinder_puppet']))
r.update(setup_nova(r[node], r['librarian_{}'.format(node)], r['mariadb_service'], r['rabbitmq_service1'],
r['admin_user'], r['openstack_vhost'], r['services_tenant'],
r['keystone_puppet'], r['openstack_rabbitmq_user']))
r.update(setup_nova_api(r[node], r['nova_puppet'], r['neutron_agents_metadata']))
r.update(setup_nova_conductor(r[node], r['nova_puppet'], r['nova_api_puppet']))
r.update(setup_nova_scheduler(r[node], r['nova_puppet'], r['nova_api_puppet']))
r.update(setup_glance_api(r[node], r['librarian_{}'.format(node)], r['mariadb_service'], r['admin_user'],
r['keystone_puppet'], r['services_tenant'],
r['cinder_glance_puppet']))
r.update(setup_glance_registry(r[node], r['glance_api_puppet']))
return r
def create_compute(node):
r = {r.name: r for r in resource.load_all()}
librarian_node = 'librarian_{}'.format(node)
res = {}
res.update(setup_neutron_compute(r[node], r[librarian_node], r['neutron_puppet'], r['neutron_server_puppet']))
res.update(setup_nova_compute(r[node], r[librarian_node], r['nova_puppet'], r['nova_api_puppet'],
r['neutron_server_puppet'], r['neutron_keystone_service_endpoint'], r['glance_api_puppet']))
return r
@click.command()
def create_all():
ModelMeta.remove_all()
r = prepare_nodes(2)
r.update(create_controller('node1'))
r.update(create_compute('node2'))
print '\n'.join(r.keys())
@click.command()
@click.argument('nodes_count')
def prepare(nodes_count):
r = prepare_nodes(nodes_count)
print '\n'.join(r.keys())
@click.command()
@click.argument('node')
def add_compute(node):
r = create_compute(node)
print '\n'.join(r.keys())
@click.command()
@click.argument('node')
def add_controller(node):
r = create_controller(node)
print '\n'.join(r.keys())
@click.command()
def clear():
ModelMeta.remove_all()
if __name__ == '__main__':
main.add_command(create_all)
main.add_command(prepare)
main.add_command(add_controller)
main.add_command(add_compute)
main.add_command(clear)
main()
if PROFILE:
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print s.getvalue()
sys.exit(0)

View File

@ -1,22 +0,0 @@
id: primary_controller
resources:
- id: rabbit_user
from: resources/rabbitmq_user
location: {{node}}
input:
user_name: {{user_name}}
password: {{password}}
vhost_name: {{vhost_res}}::vhost_name
updates:
- id: {{for}}
input:
{{for_user}}: rabbit_user::user_name
{{for_password}}: rabbit_user::password
events:
- type: depends_on
parent_action: rabbit_user.run
state: success
child_action: {{for}}.update

View File

@ -1,79 +0,0 @@
#!/usr/bin/env python
import requests
from solar.core.resource import composer as cr
from solar.events.api import add_event
from solar.events.controls import React
discovery_service = 'http://0.0.0.0:8881'
bareon_partitioning = 'http://0.0.0.0:9322/v1/nodes/{0}/partitioning'
bareon_repos = 'http://0.0.0.0:9322/v1/nodes/{0}/repos'
bareon_sync = 'http://0.0.0.0:9322/v1/actions/sync_all'
class NodeAdapter(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
@property
def node_id(self):
return self['id']
@property
def partitioning(self):
return requests.get(bareon_partitioning.format(self['id'])).json()
@property
def repos(self):
return requests.get(bareon_repos.format(self['id'])).json()
# Sync hw info about nodes from discovery service into bareon-api
requests.post(bareon_sync)
# Get list of nodes from discovery service
nodes_list = requests.get(discovery_service).json()
# Create slave node resources
node_resources = cr.create('nodes', 'templates/not_provisioned_nodes',
{'nodes': nodes_list})
# Get master node
master_node = filter(lambda n: n.name == 'node_master', node_resources)[0]
with open('/vagrant/tmp/keys/ssh_public') as fp:
master_key = fp.read().strip()
# Dnsmasq resources
for node in nodes_list:
node = NodeAdapter(node)
node_resource = next(n for n in node_resources
if n.name.endswith('node_{0}'.format(node.node_id)))
node_resource.update(
{
'partitioning': node.partitioning,
'master_key': master_key,
'repos': node.repos,
}
)
dnsmasq = cr.create('dnsmasq_{0}'.format(node.node_id),
'resources/dnsmasq', {})[0]
master_node.connect(dnsmasq)
node_resource.connect(dnsmasq, {'admin_mac': 'exclude_mac_pxe'})
event = React(node_resource.name, 'run', 'success', node_resource.name,
'provision')
add_event(event)
event = React(node_resource.name, 'provision', 'success', dnsmasq.name,
'exclude_mac_pxe')
add_event(event)
event = React(dnsmasq.name, 'exclude_mac_pxe', 'success',
node_resource.name, 'reboot')
add_event(event)

View File

@ -1,17 +0,0 @@
#!/bin/bash
set -eux
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Remove generated pxe exclude files
sudo rm -f /etc/dnsmasq.d/no_pxe_*.conf
sudo service dnsmasq restart
solar resource clear_all
python "${DIR}"/provision.py
solar changes stage
solar changes process
solar orch run-once last
watch --color -n1 'solar orch report last'

View File

@ -1,47 +0,0 @@
Example of 3 node riak cluster.
At first run:
`python examples/riak/riaks.py deploy`
It will prepare riak nodes etc.
Then you can continue with standard solar things:
```
solar changes stage -d
solar changes process
solar orch run-once last
watch -n 1 solar orch report last
```
Wait until all actions have state `SUCCESS`
After that you can add HAProxy on each node:
`python examples/riak/riaks.py add_haproxies`
Then again normal solar stuff
```
solar changes stage -d
solar changes process
solar orch run-once last
watch -n 1 solar orch report last
```
Wait until all actions have state `SUCCESS`
After that you have basic 3 node riak cluster running.
You can also modify riak http port by:
`solar resource update riak_service1 riak_port_http=18100`
And then again standard stuff:
```
solar changes stage -d
solar changes process
solar orch run-once last
watch -n 1 solar orch report last
```

View File

@ -1,50 +0,0 @@
id: haproxy_riak_config
resources:
- id: haproxy_riak_config_http
from: resources/haproxy_service_config
tags: ['service=riak', 'protocol=http']
input:
listen_port: #{http_listen_port}#
protocol: 'http'
name: 'riak_haproxy_http'
backends:server:
#% for riak in riaks %#
- #{riak}#::riak_hostname
#% endfor %#
backends:port:
#% for riak in riaks %#
- #{riak}#::riak_port_http
#% endfor %#
- id: haproxy_riak_config_pb
from: resources/haproxy_service_config
tags: ['service=riak', 'protocol=tcp']
input:
listen_port: #{pb_listen_port}#
protocol: 'tcp'
name: 'riak_haproxy_pb'
backends:server:
#% for riak in riaks %#
- #{riak}#::riak_hostname
#% endfor %#
backends:port:
#% for riak in riaks %#
- #{riak}#::riak_port_pb
#% endfor %#
updates:
- with_tags: ['resource=haproxy_config']
input:
config:protocol:
- haproxy_riak_config_http::protocol
- haproxy_riak_config_pb::protocol
config:listen_port:
- haproxy_riak_config_http::listen_port
- haproxy_riak_config_pb::listen_port
config:name:
- haproxy_riak_config_http::name
- haproxy_riak_config_pb::name
config:backends:
- haproxy_riak_config_http::backends
- haproxy_riak_config_pb::backends

View File

@ -1,54 +0,0 @@
id: riak_cluster
resources:
- id: riak_service1
# `./` added by intention
from: ./riak_service.yaml
input:
node: #{nodes[0]}#
index: 1
join_to: ''
- id: riak_service2
# `./` ommited by intention
from: riak_service.yaml
input:
node: #{nodes[1]}#
index: 2
join_to: riak_service1
- id: riak_service3
# `./` ommited by intention
from: riak_service.yaml
input:
node: #{nodes[2]}#
index: 3
join_to: riak_service1
- id: haproxy_riak_config
from: ./haproxy_riak_config.yaml
input:
http_listen_port: 8098
pb_listen_port: 8087
riaks: ['riak_service1', 'riak_service2', 'riak_service3']
- id: haproxy1
from: templates/haproxy
input:
node: #{nodes[0]}#
service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http']
index: 1
- id: haproxy2
from: templates/haproxy
input:
node: #{nodes[1]}#
service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http']
index: 2
- id: haproxy3
from: templates/haproxy
input:
node: #{nodes[2]}#
service_configs: ['haproxy_riak_config_pb', 'haproxy_riak_config_http']
index: 3

View File

@ -1,61 +0,0 @@
id: riak_service
resources:
- id: riak_service#{index}#
from: resources/riak_node
location: #{node}#
input:
riak_self_name: riak#{index}#
riak_hostname: riak_server#{index}#.solar
riak_name: riak#{index}#@riak_server#{index}#.solar
#% if join_to %#
join_to: #{join_to}#::riak_name
#% endif %#
ip: #{node}#::ip
updates:
- with_tags: 'resource=hosts_file'
input:
hosts:name:
- riak_service#{index}#::riak_hostname::NO_EVENTS
hosts:ip:
- riak_service#{index}#::ip::NO_EVENTS
- with_tags: 'resource=haproxy_service_config & service=riak & protocol=http'
input:
backends:server:
- riak_service#{index}#::riak_hostname
backends:port:
- riak_service#{index}#::riak_port_http
- with_tags: 'resource=haproxy_service_config & service=riak & protocol=tcp'
input:
backends:server:
- riak_service#{index}#::riak_hostname
backends:port:
- riak_service#{index}#::riak_port_pb
events:
- type: depends_on
parent:
with_tags: 'resource=hosts_file & location=#{node}#'
action: run
state: success
child_action: riak_service#{index}#.run
#% if join_to %#
- type: react_on
parent_action: riak_service#{index}#.run
state: success
child_action: riak_service#{index}#.join
- type: react_on
parent_action: riak_service#{index}#.leave
state: success
child_action: riak_service#{index}#.join
- type: react_on
parent_action: riak_service#{index}#.join
state: success
child_action: #{join_to}#.commit
#% endif %#

View File

@ -1,174 +0,0 @@
#!/usr/bin/env python
# WARNING: this might not be most up-to-date script and not all things might
# work here, for most up-to-date version see example-riaks.py
# This is just a demo of the template language of Solar
import click
import sys
from solar.core import resource
from solar import template
from solar.dblayer.model import ModelMeta
def setup_riak():
ModelMeta.remove_all()
nodes = template.nodes_from('templates/riak_nodes')
riak_services = nodes.on_each(
'resources/riak_node',
args={
'riak_self_name': 'riak{num}',
'riak_hostname': 'riak_server{num}.solar',
'riak_name': 'riak{num}@riak_server{num}.solar',
}
)
slave_riak_services = riak_services.tail()
riak_services.take(0).connect_list(
slave_riak_services,
mapping={
'riak_name': 'join_to',
}
)
hosts_files = nodes.on_each('resources/hosts_file')
riak_services.connect_list_to_each(
hosts_files,
mapping={
'ip': 'hosts:ip',
'riak_hostname': 'hosts:name',
},
events=False
)
errors = resource.validate_resources()
for r, error in errors:
click.echo('ERROR: %s: %s' % (r.name, error))
if errors:
click.echo("ERRORS")
sys.exit(1)
hosts_files.add_deps('run/success', riak_services, 'run')
slave_riak_services.add_reacts('run/success', slave_riak_services, 'join')
slave_riak_services.add_reacts('leave/success', slave_riak_services, 'join')
slave_riak_services.add_react('run/success', riak_services.take(0), 'commit')
def setup_haproxies():
# TODO: VR loading needs to be supported, then we can do something like
# nodes = template.load('nodes')
nodes = template.ResourceListTemplate([
resource.load('node1'),
resource.load('node2'),
resource.load('node3'),
])
riak_services = template.ResourceListTemplate([
resource.load('riak_node-0'),
resource.load('riak_node-1'),
resource.load('riak_node-2'),
])
haproxy_services = nodes.on_each(
'resources/haproxy_service'
)
haproxy_configs = nodes.on_each(
'resources/haproxy_config'
)
haproxy_service_configs_http = riak_services.on_each(
'resources/haproxy_service_config',
{
'listen_port': 8098,
'protocol': 'http',
'name': 'riak_haproxy_http{num}',
}
)
haproxy_service_configs_pb = riak_services.on_each(
'resources/haproxy_service_config',
{
'listen_port': 8087,
'protocol': 'tcp',
'name': 'riak_haproxy_pb{num}',
}
)
riak_services.connect_list_to_each(
haproxy_service_configs_http,
{
'riak_hostname': 'backends:server',
'riak_port_http': 'backends:port',
}
)
riak_services.connect_list_to_each(
haproxy_service_configs_pb,
{
'riak_hostname': 'backends:server',
'riak_port_pb': 'backends:port',
}
)
haproxy_service_configs_http.connect_list(
haproxy_configs,
{
'backends': 'config:backends',
'listen_port': 'config:listen_port',
'protocol': 'config:protocol',
'name': 'config:name',
}
)
haproxy_service_configs_pb.connect_list(
haproxy_configs,
{
'backends': 'config:backends',
'listen_port': 'config:listen_port',
'protocol': 'config:protocol',
'name': 'config:name',
}
)
#nodes.add_reacts('run/success', haproxy_services, 'install')
haproxy_services.add_deps('run/success', haproxy_configs, 'run')
haproxy_configs.add_reacts('run/success', haproxy_services, 'apply_config')
haproxy_configs.add_reacts('update/success', haproxy_services, 'apply_config')
errors = resource.validate_resources()
for r, error in errors:
click.echo('ERROR: %s: %s' % (r.name, error))
if errors:
click.echo("ERRORS")
sys.exit(1)
@click.group()
def main():
pass
@click.command()
def deploy():
setup_riak()
@click.command()
def add_haproxies():
setup_haproxies()
@click.command()
def undeploy():
raise NotImplemented("Not yet")
main.add_command(deploy)
main.add_command(undeploy)
main.add_command(add_haproxies)
if __name__ == '__main__':
main()

View File

@ -1,266 +0,0 @@
#!/usr/bin/env python
# To run:
# python example-riaks.py deploy
# solar changes stage
# solar changes process
# solar orch run-once last
# python example-riaks.py add_haproxies
# solar changes stage
# solar changes process
# solar orch run-once last
import click
import sys
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import composer as cr
from solar import errors
from solar.dblayer.model import ModelMeta
from solar.events.controls import React, Dep
from solar.events.api import add_event
from solar.dblayer.solar_models import Resource
def setup_riak():
ModelMeta.remove_all()
resources = cr.create('nodes', 'templates/nodes', {'count': 3})
nodes = resources.like('node')
hosts_services = resources.like('hosts_file')
node1, node2, node3 = nodes
riak_services = []
ips = '10.0.0.%d'
for i in xrange(3):
num = i + 1
r = cr.create('riak_service%d' % num,
'resources/riak_node',
{'riak_self_name': 'riak%d' % num,
'storage_backend': 'leveldb',
'riak_hostname': 'riak_server%d.solar' % num})[0]
r.connect(r, {'riak_self_name': 'riak_name',
'riak_hostname': 'riak_name'})
riak_services.append(r)
for i, riak in enumerate(riak_services):
nodes[i].connect(riak)
for i, riak in enumerate(riak_services[1:]):
riak_services[0].connect(riak, {'riak_name': 'join_to'})
for riak in riak_services:
for hosts_file in hosts_services:
riak.connect_with_events(hosts_file,
{'riak_hostname': 'hosts:name',
'ip': 'hosts:ip'})
Resource.save_all_lazy()
errors = resource.validate_resources()
for r, error in errors:
click.echo('ERROR: %s: %s' % (r.name, error))
has_errors = False
if errors:
click.echo("ERRORS")
sys.exit(1)
events = [
Dep('hosts_file1', 'run', 'success', 'riak_service1', 'run'),
Dep('hosts_file2', 'run', 'success', 'riak_service2', 'run'),
Dep('hosts_file3', 'run', 'success', 'riak_service3', 'run'),
React('riak_service2', 'run', 'success', 'riak_service2', 'join'),
React('riak_service3', 'run', 'success', 'riak_service3', 'join'),
# Dep('riak_service1', 'run', 'success', 'riak_service2', 'join'),
# Dep('riak_service1', 'run', 'success', 'riak_service3', 'join'),
# React('riak_service2', 'join', 'error', 'riak_service2', 'leave'),
# React('riak_service3', 'join', 'error', 'riak_service3', 'leave'),
React('riak_service2', 'leave', 'success', 'riak_service2', 'join'),
React('riak_service3', 'leave', 'success', 'riak_service3', 'join'),
# React('riak_service2', 'leave', 'success', 'riak_service1', 'commit_leave'),
# React('riak_service3', 'leave', 'success', 'riak_service1', 'commit_leave'),
# Dep('riak_service1', 'commit_leave', 'success', 'riak_service2', 'join'),
# Dep('riak_service1', 'commit_leave', 'success', 'riak_service3', 'join'),
React('riak_service3', 'join', 'success', 'riak_service1', 'commit'),
React('riak_service2', 'join', 'success', 'riak_service1', 'commit')
]
for event in events:
add_event(event)
click.echo('Use solar changes process & orch')
sys.exit(0)
def setup_haproxies():
hps = []
hpc = []
hpsc_http = []
hpsc_pb = []
for i in xrange(3):
num = i + 1
hps.append(cr.create('haproxy_service%d' % num,
'resources/haproxy_service',
{})[0])
hpc.append(cr.create('haproxy_config%d' % num,
'resources/haproxy_config',
{})[0])
hpsc_http.append(cr.create('haproxy_service_config_http%d' % num,
'resources/haproxy_service_config',
{'listen_port': 8098,
'protocol': 'http',
'name': 'riak_haproxy_http%d' % num})[0])
hpsc_pb.append(cr.create('haproxy_service_config_pb%d' % num,
'resources/haproxy_service_config',
{'listen_port': 8087,
'protocol': 'tcp',
'name': 'riak_haproxy_pb%d' % num})[0])
riak1 = resource.load('riak_service1')
riak2 = resource.load('riak_service2')
riak3 = resource.load('riak_service3')
riaks = [riak1, riak2, riak3]
for single_hpsc in hpsc_http:
for riak in riaks:
riak.connect(single_hpsc, {
'riak_hostname': 'backends:server',
'riak_port_http': 'backends:port'})
for single_hpsc in hpsc_pb:
for riak in riaks:
riak.connect(single_hpsc,
{'riak_hostname': 'backends:server',
'riak_port_pb': 'backends:port'})
# haproxy config to haproxy service
for single_hpc, single_hpsc in zip(hpc, hpsc_http):
single_hpsc.connect(single_hpc, {"backends": "config:backends",
"listen_port": "config:listen_port",
"protocol": "config:protocol",
"name": "config:name"})
for single_hpc, single_hpsc in zip(hpc, hpsc_pb):
single_hpsc.connect(single_hpc, {"backends": "config:backends",
"listen_port": "config:listen_port",
"protocol": "config:protocol",
"name": "config:name"})
# assign haproxy services to each node
node1 = resource.load('node1')
node2 = resource.load('node2')
node3 = resource.load('node3')
nodes = [node1, node2, node3]
for single_node, single_hps in zip(nodes, hps):
single_node.connect(single_hps)
for single_node, single_hpc in zip(nodes, hpc):
single_node.connect(single_hpc)
has_errors = False
for r in locals().values():
# TODO: handle list
if not isinstance(r, resource.Resource):
continue
# print 'Validating {}'.format(r.name)
local_errors = validation.validate_resource(r)
if local_errors:
has_errors = True
print 'ERROR: %s: %s' % (r.name, local_errors)
if has_errors:
print "ERRORS"
sys.exit(1)
events = []
for node, single_hps, single_hpc in zip(nodes, hps, hpc):
# r = React(node.name, 'run', 'success', single_hps.name, 'install')
d = Dep(single_hps.name, 'run', 'success', single_hpc.name, 'run')
e1 = React(single_hpc.name, 'run', 'success', single_hps.name, 'apply_config')
e2 = React(single_hpc.name, 'update', 'success', single_hps.name, 'apply_config')
# events.extend([r, d, e1, e2])
events.extend([d, e1, e2])
for event in events:
add_event(event)
@click.command()
@click.argument('i', type=int, required=True)
def add_solar_agent(i):
solar_agent_transport = cr.create('solar_agent_transport%s' % i, 'resources/transport_solar_agent',
{'solar_agent_user': 'vagrant',
'solar_agent_password': 'password'})[0]
transports = resource.load('transports%s' % i)
ssh_transport = resource.load('ssh_transport%s' % i)
transports_for_solar_agent = cr.create('transports_for_solar_agent%s' % i, 'resources/transports')[0]
# install solar_agent with ssh
signals.connect(transports_for_solar_agent, solar_agent_transport, {})
signals.connect(ssh_transport, transports_for_solar_agent, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
# add solar_agent to transports on this node
signals.connect(solar_agent_transport, transports, {'solar_agent_user': 'transports:user',
'solar_agent_port': 'transports:port',
'solar_agent_password': 'transports:password',
'name': 'transports:name'})
@click.group()
def main():
pass
@click.command()
def deploy():
setup_riak()
@click.command()
def add_haproxies():
setup_haproxies()
@click.command()
def undeploy():
raise NotImplemented("Not yet")
@click.command()
def create_all():
setup_riak()
setup_haproxies()
main.add_command(deploy)
main.add_command(undeploy)
main.add_command(add_haproxies)
main.add_command(add_solar_agent)
main.add_command(create_all)
if __name__ == '__main__':
main()

View File

@ -1,103 +0,0 @@
#!/usr/bin/env python
# this allows you to create riak cluster as big as you want
import click
import sys
from solar.core import resource
from solar.core import signals
from solar.core import validation
from solar.core.resource import composer as cr
from solar import errors
from solar.interfaces.db import get_db
from solar.events.controls import React, Dep
from solar.events.api import add_event
db = get_db()
NODES = 3
def setup_riak(nodes_num=None, hosts_mapping=False):
if nodes_num is None:
nodes_num = NODES
db.clear()
resources = cr.create('nodes', 'templates/nodes', {'count': nodes_num})
nodes = [x for x in resources if x.name.startswith('node')]
hosts_services = [x for x in resources if x.name.startswith('hosts_file')]
riak_services = []
ips = '10.0.0.%d'
for i in xrange(nodes_num):
num = i + 1
r = cr.create('riak_service%d' % num,
'resources/riak_node',
{'riak_self_name': 'riak%d' % num,
'riak_hostname': 'riak_server%d.solar' % num,
'riak_name': 'riak%d@riak_server%d.solar' % (num, num)})[0]
riak_services.append(r)
for i, riak in enumerate(riak_services):
nodes[i].connect(riak)
for i, riak in enumerate(riak_services[1:]):
riak_services[0].connect(riak, {'riak_name': 'join_to'})
if hosts_mapping:
for riak in riak_services:
for hosts_file in hosts_services:
riak.connect_with_events(hosts_file,
{'riak_hostname': 'hosts:name',
'ip': 'hosts:ip'})
res_errors = resource.validate_resources()
for r, error in res_errors:
click.echo('ERROR: %s: %s' % (r.name, error))
has_errors = False
if has_errors:
click.echo("ERRORS")
sys.exit(1)
events = []
for x in xrange(nodes_num):
i = x + 1
if hosts_mapping:
events.append(Dep('hosts_file%d' % i, 'run', 'success', 'riak_service%d' % i, 'run'))
if i >= 2:
events.append(React('riak_service%d' % i, 'run', 'success', 'riak_service%d' % i, 'join'))
events.append(React('riak_service%d' % i, 'join', 'success', 'riak_service1', 'commit'))
for event in events:
add_event(event)
click.echo('Use solar changes process & orch')
sys.exit(0)
@click.group()
def main():
pass
@click.command()
@click.argument('nodes_count', type=int)
@click.argument('hosts_mapping', type=bool)
def deploy(nodes_count, hosts_mapping):
click.secho("With big nodes_count, this example is DB heavy, it creates NxN connections, continue ? [y/N] ", fg='red', nl=False)
c= click.getchar()
if c in ('y', 'Y'):
setup_riak(nodes_count, hosts_mapping)
else:
click.echo("Aborted")
if __name__ == '__main__':
main.add_command(deploy)
main()

View File

@ -1,61 +0,0 @@
import click
import sys
import time
from solar.core import resource
from solar.core import signals
from solar.core.resource import composer as cr
from solar.dblayer.model import ModelMeta
def run():
ModelMeta.remove_all()
node = cr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()),
'ip': '10.0.0.3',
'node_id': 'node1',
})[0]
transports = cr.create('transports_node1', 'resources/transports')[0]
transports_for_solar_agent = cr.create('transports_for_solar_agent', 'resources/transports')[0]
ssh_transport = cr.create('ssh_transport', 'resources/transport_ssh',
{'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key',
'ssh_user': 'vagrant'})[0]
solar_agent_transport = cr.create('solar_agent_transport', 'resources/transport_solar_agent',
{'solar_agent_user': 'vagrant',
'solar_agent_password': 'password'})[0]
transports_for_solar_agent.connect(solar_agent_transport, {})
ssh_transport.connect(transports_for_solar_agent,{'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
# set transports_id
transports.connect(node, {})
# it uses reverse mappings
ssh_transport.connect(transports, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
solar_agent_transport.connect(transports, {'solar_agent_user': 'transports:user',
'solar_agent_port': 'transports:port',
'solar_agent_password': 'transports:password',
'name': 'transports:name'})
hosts = cr.create('hosts_file', 'resources/hosts_file', {})[0]
node.connect(hosts, {
'ip': 'hosts:ip',
'name': 'hosts:name'
})
# for r in (node, hosts, ssh_transport, transports):
# print r.name, repr(r.args['location_id']), repr(r.args['transports_id'])
# print hosts.transports()
# print hosts.ip()
run()

View File

@ -1,25 +0,0 @@
Example of using torrent transport with solar. Torrent is used to distribute task data. After fetching is finished torrent client forks and continues seeding.
The example contains single node with single host mapping + transports.
Execute:
```
python examples/torrent/example.py
solar changes stage
solar changes process
solar orch run-once last
```
Wait for finish:
```
solar orch report last -w 100
```
After this you should see new entry in `/etc/hosts` file.
* All created torrents are in `/vagrant/torrents`, it doesn't need to be shared
* Initial seeding is done using torrent file
* Downloading and then seeding is always done with magnetlinks

View File

@ -1,70 +0,0 @@
import time
from solar.core.resource import composer as cr
from solar import errors
from solar.dblayer.model import ModelMeta
def run():
ModelMeta.remove_all()
node = cr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()),
'ip': '10.0.0.3',
'node_id': 'node1',
})[0]
transports = cr.create('transports_node1', 'resources/transports')[0]
ssh_transport = cr.create('ssh_transport', 'resources/transport_ssh',
{'ssh_key': '/vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key',
'ssh_user': 'vagrant'})[0]
transports.connect(node, {})
# it uses reverse mappings
ssh_transport.connect(transports, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
hosts = cr.create('hosts_file', 'resources/hosts_file', {})[0]
# let's add torrent transport for hosts file deployment (useless in real life)
torrent_transport = cr.create('torrent_transport',
'resources/transport_torrent',
{'trackers': ['udp://open.demonii.com:1337',
'udp://tracker.openbittorrent.com:80']})[0]
# you could use any trackers as you want
transports_for_torrent = cr.create(
'transports_for_torrent', 'resources/transports')[0]
transports_for_torrent.connect(torrent_transport, {})
ssh_transport.connect_with_events(transports_for_torrent, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'},
events={})
transports_for_hosts = cr.create(
'transports_for_hosts', 'resources/transports')[0]
torrent_transport.connect(transports_for_hosts, {'trackers': 'transports:trackers',
'name': 'transports:name'})
ssh_transport.connect(transports_for_hosts, {'ssh_key': 'transports:key',
'ssh_user': 'transports:user',
'ssh_port': 'transports:port',
'name': 'transports:name'})
transports_for_hosts.connect(hosts)
transports_for_hosts.connect_with_events(node, events={})
node.connect(hosts, {
'ip': 'hosts:ip',
'name': 'hosts:name'
})
run()

View File

@ -1,9 +0,0 @@
- hosts: localhost
sudo: yes
vars:
var1: 'playbook'
roles:
- { role: "test_role" }
tasks:
- debug: msg="VAR1 value is {{var1}}"
- fail: msg='just test failure'

View File

@ -1,4 +0,0 @@
var1: initial
uuid: stuff
def1: the_same

View File

@ -1 +0,0 @@
- debug: msg="Variable1 {{ var1 }} with uuid {{ uuid }} and default var {{ def1 }}"

View File

@ -1,10 +0,0 @@
handler: ansible_playbook
version: 0.0.1
input:
var1:
type: str!
value: meta
uuid:
type: str!
value: 'aa1das1231'

View File

@ -1,6 +0,0 @@
- hosts: '*'
sudo: yes
vars:
default1: playbook
tasks:
- debug: msg="my message {{default1}}"

View File

@ -1,15 +0,0 @@
handler: ansible_playbook
version: 0.0.1
input:
ip:
type: str!
value:
# ssh_user:
# type: str!
# value:
# ssh_key:
# type: str!
# value:
default1:
type: str!
value: meta

View File

@ -1,4 +0,0 @@
# Apache puppet resource
This class installs Apache and manages apache service.
Defaults provided for Debian OS family.

View File

@ -1,5 +0,0 @@
class {'apache':
service_enable => false,
service_ensure => 'stopped',
package_ensure => 'absent',
}

View File

@ -1,120 +0,0 @@
$resource = hiera($::resource_name)
$apache_name = $resource['input']['apache_name']
$service_name = $resource['input']['service_name']
$default_mods = $resource['input']['default_mods']
$default_vhost = $resource['input']['default_vhost']
$default_charset = $resource['input']['default_charset']
$default_confd_files = $resource['input']['default_confd_files']
$default_ssl_vhost = $resource['input']['default_ssl_vhost']
$default_ssl_cert = $resource['input']['default_ssl_cert']
$default_ssl_key = $resource['input']['default_ssl_key']
$default_ssl_chain = $resource['input']['default_ssl_chain']
$default_ssl_ca = $resource['input']['default_ssl_ca']
$default_ssl_crl_path = $resource['input']['default_ssl_crl_path']
$default_ssl_crl = $resource['input']['default_ssl_crl']
$default_ssl_crl_check = $resource['input']['default_ssl_crl_check']
$default_type = $resource['input']['default_type']
$ip = $resource['input']['ip']
$service_restart = $resource['input']['service_restart']
$purge_configs = $resource['input']['purge_configs']
$purge_vhost_dir = $resource['input']['purge_vhost_dir']
$purge_vdir = $resource['input']['purge_vdir']
$serveradmin = $resource['input']['serveradmin']
$sendfile = $resource['input']['sendfile']
$error_documents = $resource['input']['error_documents']
$timeout = $resource['input']['timeout']
$httpd_dir = $resource['input']['httpd_dir']
$server_root = $resource['input']['server_root']
$conf_dir = $resource['input']['conf_dir']
$confd_dir = $resource['input']['confd_dir']
$vhost_dir = $resource['input']['vhost_dir']
$vhost_enable_dir = $resource['input']['vhost_enable_dir']
$mod_dir = $resource['input']['mod_dir']
$mod_enable_dir = $resource['input']['mod_enable_dir']
$mpm_module = $resource['input']['mpm_module']
$lib_path = $resource['input']['lib_path']
$conf_template = $resource['input']['conf_template']
$servername = $resource['input']['servername']
$manage_user = $resource['input']['manage_user']
$manage_group = $resource['input']['manage_group']
$user = $resource['input']['user']
$group = $resource['input']['group']
$keepalive = $resource['input']['keepalive']
$keepalive_timeout = $resource['input']['keepalive_timeout']
$max_keepalive_requests = $resource['input']['max_keepalive_requests']
$logroot = $resource['input']['logroot']
$logroot_mode = $resource['input']['logroot_mode']
$log_level = $resource['input']['log_level']
$log_formats = $resource['input']['log_formats']
$ports_file = $resource['input']['ports_file']
$docroot = $resource['input']['docroot']
$apache_version = $resource['input']['apache_version']
$server_tokens = $resource['input']['server_tokens']
$server_signature = $resource['input']['server_signature']
$trace_enable = $resource['input']['trace_enable']
$allow_encoded_slashes = $resource['input']['allow_encoded_slashes']
$package_ensure = $resource['input']['package_ensure']
$use_optional_includes = $resource['input']['use_optional_includes']
class {'apache':
apache_name => $apache_name,
service_name => $service_name,
default_mods => $default_mods,
default_vhost => $default_vhost,
default_charset => $default_charset,
default_confd_files => $default_confd_files,
default_ssl_vhost => $default_ssl_vhost,
default_ssl_cert => $default_ssl_cert,
default_ssl_key => $default_ssl_key,
default_ssl_chain => $default_ssl_chain,
default_ssl_ca => $default_ssl_ca,
default_ssl_crl_path => $default_ssl_crl_path,
default_ssl_crl => $default_ssl_crl,
default_ssl_crl_check => $default_ssl_crl_check,
default_type => $default_type,
ip => $ip,
service_enable => true,
service_manage => true,
service_ensure => 'running',
service_restart => $service_restart,
purge_configs => $purge_configs,
purge_vhost_dir => $purge_vhost_dir,
purge_vdir => $purge_vdir,
serveradmin => $serveradmin,
sendfile => $sendfile,
error_documents => $error_documents,
timeout => $timeout,
httpd_dir => $httpd_dir,
server_root => $server_root,
conf_dir => $conf_dir,
confd_dir => $confd_dir,
vhost_dir => $vhost_dir,
vhost_enable_dir => $vhost_enable_dir,
mod_dir => $mod_dir,
mod_enable_dir => $mod_enable_dir,
mpm_module => $mpm_module,
lib_path => $lib_path,
conf_template => $conf_template,
servername => $servername,
manage_user => $manage_user,
manage_group => $manage_group,
user => $user,
group => $group,
keepalive => $keepalive,
keepalive_timeout => $keepalive_timeout,
max_keepalive_requests => $max_keepalive_requests,
logroot => $logroot,
logroot_mode => $logroot_mode,
log_level => $log_level,
log_formats => $log_formats,
ports_file => $ports_file,
docroot => $docroot,
apache_version => $apache_version,
server_tokens => $server_tokens,
server_signature => $server_signature,
trace_enable => $trace_enable,
allow_encoded_slashes => $allow_encoded_slashes,
package_ensure => $package_ensure,
use_optional_includes => $use_optional_includes,
}

View File

@ -1,184 +0,0 @@
handler: puppet
version: 1.0.0
input:
apache_name:
schema: str
value: 'apache2'
service_name:
schema: str
value: 'apache2'
default_mods:
schema: bool
value: true
default_vhost:
schema: bool
value: true
default_charset:
schema: str
value:
default_confd_files:
schema: bool
value: true
default_ssl_vhost:
schema: bool
value: false
default_ssl_cert:
schema: str
value: '/etc/ssl/certs/ssl-cert-snakeoil.pem'
default_ssl_key:
schema: str
value: '/etc/ssl/private/ssl-cert-snakeoil.key'
default_ssl_chain:
schema: str
value:
default_ssl_ca:
schema: str
value:
default_ssl_crl_path:
schema: str
value:
default_ssl_crl:
schema: str
value:
default_ssl_crl_check:
schema: str
value:
default_type:
schema: str
value: 'none'
service_restart:
schema: str
value: 'restart'
purge_configs:
schema: bool
value: true
purge_vhost_dir:
schema: str
value:
purge_vdir:
schema: bool
value: false
serveradmin:
schema: str
value: 'root@localhost'
sendfile:
schema: str
value: 'On'
error_documents:
schema: bool
value: false
timeout:
schema: int
value: 120
httpd_dir:
schema: str
value: '/etc/apache2'
server_root:
schema: str
value: '/etc/apache2'
conf_dir:
schema: str
value: '/etc/apache2'
confd_dir:
schema: str
value: '/etc/apache2/conf.d'
vhost_dir:
schema: str
value: '/etc/apache2/sites-available'
vhost_enable_dir:
schema: str
value: '/etc/apache2/sites-enabled'
mod_dir:
schema: str
value: '/etc/apache2/mods-available'
mod_enable_dir:
schema: str
value: '/etc/apache2/mods-enabled'
mpm_module:
schema: str
value: 'worker'
lib_path:
schema: str
value: '/usr/lib/apache2/modules'
conf_template:
schema: str
value: 'apache/httpd.conf.erb'
servername:
schema: str!
value:
manage_user:
schema: bool
value: true
manage_group:
schema: bool
value: true
user:
schema: str
value: 'www-data'
group:
schema: str
value: 'www-data'
keepalive:
schema: str
value: 'Off'
keepalive_timeout:
schema: int
value: 15
max_keepalive_requests:
schema: int
value: 100
logroot:
schema: str
value: '/var/log/apache2'
logroot_mode:
schema: str
value: '0640'
log_level:
schema: str
value: 'warn'
log_formats:
schema: {}
value: {}
ports_file:
schema: str
value: '/etc/apache2/ports.conf'
docroot:
schema: str
value: '/srv/www'
apache_version:
schema: str
value: '2.4'
server_tokens:
schema: str
value: 'OS'
server_signature:
schema: str
value: 'On'
trace_enable:
schema: str
value: 'On'
allow_encoded_slashes:
schema: str
value:
package_ensure:
schema: str
value: 'installed'
use_optional_includes:
schema: bool
value: false
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/puppetlabs/puppetlabs-apache.git', branch: '1.5.0'}
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
tags: [resource/apache_service, resources/apache]

View File

@ -1,11 +0,0 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing apache_puppet')
requests.get(
'http://%s:%s' % (resource.args['ip'], 80)
)

View File

@ -1,9 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: rm -f {{item}}
with_items:
- /etc/apt/sources.list.d/{{name}}.list
- /etc/apt/preferences.d/{{name}}.pref
- shell: apt-get update
when: {{validate_integrity}}

View File

@ -1,11 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- template:
src: {{templates_dir}}/source
dest: /etc/apt/sources.list.d/{{name}}.list
- template:
src: {{templates_dir}}/preferences
dest: /etc/apt/preferences.d/{{name}}.pref
- shell: apt-get update
when: {{validate_integrity}}

View File

@ -1,24 +0,0 @@
handler: ansible
version: 1.0.0
input:
ip:
schema: str!
value:
repo:
schema: str!
value:
name:
schema: str!
value:
package:
schema: str
value: '*'
pin:
schema: str
value:
pin_priority:
schema: int
value:
validate_integrity:
schema: bool
value: true

View File

@ -1,3 +0,0 @@
Package: {{package}}
Pin: {{pin}}
Pin-Priority: {{pin_priority}}

View File

@ -1 +0,0 @@
{{repo}}

View File

@ -1,17 +0,0 @@
#!/bin/sh
BASE_PATH={{ target_directory }}
KEY_NAME={{ key_name }}
function generate_ssh_keys {
local dir_path=$BASE_PATH$KEY_NAME/
local key_path=$dir_path$KEY_NAME
mkdir -p $dir_path
if [ ! -f $key_path ]; then
ssh-keygen -b 2048 -t rsa -N '' -f $key_path 2>&1
else
echo 'Key $key_path already exists'
fi
}
generate_ssh_keys

View File

@ -1,16 +0,0 @@
handler: shell
version: 1.0.0
input:
ip:
schema: str!
value:
target_directory:
schema: str!
value: /var/lib/astute/
key_name:
schema: str!
value: ceph
path:
schema: str!
value: /var/lib/astute/ceph/
tags: []

View File

@ -1,95 +0,0 @@
notice('MODULAR: ceph/mon.pp')
$storage_hash = hiera('storage', {})
$public_vip = hiera('public_vip')
$management_vip = hiera('management_vip')
$use_syslog = hiera('use_syslog', true)
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
$keystone_hash = hiera('keystone', {})
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
if ($storage_hash['images_ceph']) {
$glance_backend = 'ceph'
} elsif ($storage_hash['images_vcenter']) {
$glance_backend = 'vmware'
} else {
$glance_backend = 'swift'
}
if ($storage_hash['volumes_ceph'] or
$storage_hash['images_ceph'] or
$storage_hash['objects_ceph'] or
$storage_hash['ephemeral_ceph']
) {
$use_ceph = true
} else {
$use_ceph = false
}
if $use_ceph {
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
$primary_mons = keys($ceph_primary_monitor_node)
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
prepare_network_config(hiera_hash('network_scheme'))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
$ceph_public_network = get_network_role_property('ceph/public', 'network')
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
class {'ceph':
primary_mon => $primary_mon,
mon_hosts => keys($mon_address_map),
mon_ip_addresses => values($mon_address_map),
mon_addr => $mon_addr,
cluster_node_address => $public_vip,
osd_pool_default_size => $storage_hash['osd_pool_size'],
osd_pool_default_pg_num => $storage_hash['pg_num'],
osd_pool_default_pgp_num => $storage_hash['pg_num'],
use_rgw => false,
glance_backend => $glance_backend,
rgw_pub_ip => $public_vip,
rgw_adm_ip => $management_vip,
rgw_int_ip => $management_vip,
cluster_network => $ceph_cluster_network,
public_network => $ceph_public_network,
use_syslog => $use_syslog,
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
syslog_log_facility => $syslog_log_facility_ceph,
rgw_keystone_admin_token => $keystone_hash['admin_token'],
ephemeral_ceph => $storage_hash['ephemeral_ceph']
}
if ($storage_hash['volumes_ceph']) {
include ::cinder::params
service { 'cinder-volume':
ensure => 'running',
name => $::cinder::params::volume_service,
hasstatus => true,
hasrestart => true,
}
service { 'cinder-backup':
ensure => 'running',
name => $::cinder::params::backup_service,
hasstatus => true,
hasrestart => true,
}
Class['ceph'] ~> Service['cinder-volume']
Class['ceph'] ~> Service['cinder-backup']
}
if ($storage_hash['images_ceph']) {
include ::glance::params
service { 'glance-api':
ensure => 'running',
name => $::glance::params::api_service_name,
hasstatus => true,
hasrestart => true,
}
Class['ceph'] ~> Service['glance-api']
}
}

View File

@ -1,4 +0,0 @@
prepare_network_config(hiera_hash('network_scheme'))
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
notify{"The value is: ${ceph_cluster_network}": }

View File

@ -1,37 +0,0 @@
handler: puppetv2
version: 1.0.0
input:
ip:
schema: str!
value:
public_vip:
schema: str!
value:
management_vip:
schema: str!
value:
use_syslog:
schema: bool
value: true
keystone:
schema: {'admin_token': 'str'}
value: {}
ceph_monitor_nodes:
schema: []
value: []
ceph_primary_monitor_node:
schema: []
value: []
storage:
schema: {}
value: {}
network_scheme:
schema: {}
value: {}
role:
schema: str!
value:
puppet_modules:
schema: str!
value:
tags: []

View File

@ -1,98 +0,0 @@
# Cinder API resource for puppet handler
Setup and configure the cinder API endpoint
## Parameters
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/api.pp
``keystone_password``
The password to use for authentication (keystone)
``keystone_enabled``
(optional) Use keystone for authentification
Defaults to true
``keystone_tenant``
(optional) The tenant of the auth user
Defaults to services
``keystone_user``
(optional) The name of the auth user
Defaults to cinder
``keystone_auth_host``
(optional) The keystone host
Defaults to localhost
``keystone_auth_port``
(optional) The keystone auth port
Defaults to 35357
``keystone_auth_protocol``
(optional) The protocol used to access the auth host
Defaults to http.
``os_region_name``
(optional) Some operations require cinder to make API requests
to Nova. This sets the keystone region to be used for these
requests. For example, boot-from-volume.
Defaults to undef.
``keystone_auth_admin_prefix``
(optional) The admin_prefix used to admin endpoint of the auth host
This allow admin auth URIs like http://auth_host:35357/keystone.
(where '/keystone' is the admin prefix)
Defaults to false for empty. If defined, should be a string with a
leading '/' and no trailing '/'.
``service_port``
(optional) The cinder api port
Defaults to 5000
``service_workers``
(optional) Number of cinder-api workers
Defaults to $::processorcount
``package_ensure``
(optional) The state of the package
Defaults to present
``bind_host``
(optional) The cinder api bind address
Defaults to 0.0.0.0
``ratelimits``
(optional) The state of the service
Defaults to undef. If undefined the default ratelimiting values are used.
``ratelimits_factory``
(optional) Factory to use for ratelimiting
Defaults to 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
``default_volume_type``
(optional) default volume type to use.
This should contain the name of the default volume type to use.
If not configured, it produces an error when creating a volume
without specifying a type.
Defaults to 'false'.
``validate``
(optional) Whether to validate the service is working after any service refreshes
Defaults to false
``validation_options``
(optional) Service validation options
Should be a hash of options defined in openstacklib::service_validation
If empty, defaults values are taken from openstacklib function.
Default command list volumes.
Require validate set at True.
Example:
glance::api::validation_options:
glance-api:
command: check_cinder-api.py
path: /usr/bin:/bin:/usr/sbin:/sbin
provider: shell
tries: 5
try_sleep: 10
Defaults to {}

View File

@ -1,12 +0,0 @@
class {'cinder::api':
enabled => false,
package_ensure => 'absent',
keystone_password => 'not important as removed',
}
include cinder::params
package { 'cinder':
ensure => 'absent',
name => $::cinder::params::package_name,
}

View File

@ -1,52 +0,0 @@
$resource = hiera($::resource_name)
$keystone_password = $resource['input']['keystone_password']
$keystone_enabled = $resource['input']['keystone_enabled']
$keystone_tenant = $resource['input']['keystone_tenant']
$keystone_user = $resource['input']['keystone_user']
$keystone_auth_host = $resource['input']['keystone_auth_host']
$keystone_auth_port = $resource['input']['keystone_auth_port']
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']
$keystone_auth_uri = $resource['input']['keystone_auth_uri']
$os_region_name = $resource['input']['os_region_name']
$service_port = $resource['input']['service_port']
$service_workers = $resource['input']['service_workers']
$package_ensure = $resource['input']['package_ensure']
$bind_host = $resource['input']['bind_host']
$ratelimits = $resource['input']['ratelimits']
$default_volume_type = $resource['input']['default_volume_type']
$ratelimits_factory = $resource['input']['ratelimits_factory']
$validate = $resource['input']['validate']
$validation_options = $resource['input']['validation_options']
include cinder::params
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
} ->
class {'cinder::api':
keystone_password => $keystone_password,
keystone_enabled => $keystone_enabled,
keystone_tenant => $keystone_tenant,
keystone_user => $keystone_user,
keystone_auth_host => $keystone_auth_host,
keystone_auth_port => $keystone_auth_port,
keystone_auth_protocol => $keystone_auth_protocol,
keystone_auth_admin_prefix => $keystone_auth_admin_prefix,
keystone_auth_uri => $keystone_auth_uri,
os_region_name => $os_region_name,
service_port => $service_port,
service_workers => $service_workers,
package_ensure => $package_ensure,
bind_host => $bind_host,
enabled => true,
manage_service => true,
ratelimits => $ratelimits,
default_volume_type => $default_volume_type,
ratelimits_factory => $ratelimits_factory,
validate => $validate,
validation_options => $validation_options,
}

View File

@ -1,56 +0,0 @@
$resource = hiera($::resource_name)
$keystone_password = $resource['input']['keystone_password']
$keystone_enabled = $resource['input']['keystone_enabled']
$keystone_tenant = $resource['input']['keystone_tenant']
$keystone_user = $resource['input']['keystone_user']
$keystone_auth_host = $resource['input']['keystone_auth_host']
$keystone_auth_port = $resource['input']['keystone_auth_port']
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']
$keystone_auth_uri = $resource['input']['keystone_auth_uri']
$os_region_name = $resource['input']['os_region_name']
$service_port = $resource['input']['service_port']
$service_workers = $resource['input']['service_workers']
$package_ensure = $resource['input']['package_ensure']
$bind_host = $resource['input']['bind_host']
$ratelimits = $resource['input']['ratelimits']
$default_volume_type = $resource['input']['default_volume_type']
$ratelimits_factory = $resource['input']['ratelimits_factory']
$validate = $resource['input']['validate']
$validation_options = $resource['input']['validation_options']
include cinder::params
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
} ->
class {'cinder::api':
keystone_password => $keystone_password,
keystone_enabled => $keystone_enabled,
keystone_tenant => $keystone_tenant,
keystone_user => $keystone_user,
keystone_auth_host => $keystone_auth_host,
keystone_auth_port => $keystone_auth_port,
keystone_auth_protocol => $keystone_auth_protocol,
keystone_auth_admin_prefix => $keystone_auth_admin_prefix,
keystone_auth_uri => $keystone_auth_uri,
os_region_name => $os_region_name,
service_port => $service_port,
service_workers => $service_workers,
package_ensure => $package_ensure,
bind_host => $bind_host,
enabled => true,
manage_service => true,
ratelimits => $ratelimits,
default_volume_type => $default_volume_type,
ratelimits_factory => $ratelimits_factory,
validate => $validate,
validation_options => $validation_options,
}
notify { "restart cinder api":
notify => Service["cinder-api"],
}

View File

@ -1,76 +0,0 @@
handler: puppet
version: 1.0.0
input:
keystone_password:
schema: str!
value: 'keystone'
keystone_enabled:
schema: bool
value: true
keystone_tenant:
schema: str
value: 'services'
keystone_user:
schema: str
value: 'cinder'
keystone_auth_host:
schema: str
value: 'localhost'
keystone_auth_port:
schema: int
value: 35357
keystone_auth_protocol:
schema: str
value: 'http'
keystone_auth_admin_prefix:
schema: bool
value: false
keystone_auth_uri:
schema: bool
value: false
os_region_name:
schema: str
value:
service_port:
schema: int
value: 5000
service_workers:
schema: int
value: 1
package_ensure:
schema: str
value: 'present'
bind_host:
schema: str
value: '0.0.0.0'
ratelimits:
schema: str
value:
default_volume_type:
schema: bool
value: false
ratelimits_factory:
schema: str
value: 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
validate:
schema: bool
value: false
validation_options:
schema: {}
value: {}
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
tags: [resource/cinder_api_service, resources/cinder_api, resources/cinder]

View File

@ -1,10 +0,0 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing cinder_api_puppet')
requests.get(
'http://%s:%s' % (resource.args['ip'], resource.args['service_port'])
)

View File

@ -1,39 +0,0 @@
# Cinder Volume resource for puppet handler
Glance drive Cinder as a block storage backend to store image data.
# Parameters
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/glance.pp
``glance_api_servers``
(optional) A list of the glance api servers available to cinder.
Should be an array with [hostname|ip]:port
Defaults to undef
Note: for this resource, it is decomposed to *_host and *_port due to
existing implementation limitations
``glance_api_version``
(optional) Glance API version.
Should be 1 or 2
Defaults to 2 (current version)
``glance_num_retries``
(optional) Number retries when downloading an image from glance.
Defaults to 0
``glance_api_insecure``
(optional) Allow to perform insecure SSL (https) requests to glance.
Defaults to false
``glance_api_ssl_compression``
(optional) Whether to attempt to negotiate SSL layer compression when
using SSL (https) requests. Set to False to disable SSL
layer compression. In some cases disabling this may improve
data throughput, eg when high network bandwidth is available
and you are using already compressed image formats such as qcow2.
Defaults to false
``glance_request_timeout``
(optional) http/https timeout value for glance operations.
Defaults to undef

View File

@ -1 +0,0 @@
notify { 'Nothing to remove here': }

View File

@ -1,18 +0,0 @@
$resource = hiera($::resource_name)
$glance_api_version = $resource['input']['glance_api_version']
$glance_num_retries = $resource['input']['glance_num_retries']
$glance_api_insecure = $resource['input']['glance_api_insecure']
$glance_api_ssl_compression = $resource['input']['glance_api_ssl_compression']
$glance_request_timeout = $resource['input']['glance_request_timeout']
$glance_api_servers_host = $resource['input']['glance_api_servers_host']
$glance_api_servers_port = $resource['input']['glance_api_servers_port']
class {'cinder::glance':
glance_api_servers => "${glance_api_servers_host}:${glance_api_servers_port}",
glance_api_version => $glance_api_version,
glance_num_retries => $glance_num_retries,
glance_api_insecure => $glance_api_insecure,
glance_api_ssl_compression => $glance_api_ssl_compression,
glance_request_timeout => $glance_request_timeout,
}

View File

@ -1,41 +0,0 @@
handler: puppet
version: 1.0.0
input:
glance_api_version:
schema: int
value: 2
glance_num_retries:
schema: int
value: 0
glance_api_insecure:
schema: bool
value: false
glance_api_ssl_compression:
schema: bool
value: false
glance_request_timeout:
schema: str
value:
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
glance_api_servers_port:
schema: int
value: 9292
glance_api_servers_host:
schema: 'str'
value: 'localhost'
tags: [resource/cinder_glance_service, resources/cinder_glance, resources/cinder]

View File

@ -1,112 +0,0 @@
# Cinder resource for puppet handler
Controls a live cycle of the cinder entities,
like the main puppet class, auth, DB, AMQP, packages,
keystone user, role and endpoint.
# Parameters
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/init.pp
``database_connection``
Url used to connect to database.
(Optional) Defaults to
'sqlite:////var/lib/cinder/cinder.sqlite'
``database_idle_timeout``
Timeout when db connections should be reaped.
(Optional) Defaults to 3600.
``database_min_pool_size``
Minimum number of SQL connections to keep open in a pool.
(Optional) Defaults to 1.
``database_max_pool_size``
Maximum number of SQL connections to keep open in a pool.
(Optional) Defaults to undef.
``database_max_retries``
Maximum db connection retries during startup.
Setting -1 implies an infinite retry count.
(Optional) Defaults to 10.
``database_retry_interval``
Interval between retries of opening a sql connection.
(Optional) Defaults to 10.
``database_max_overflow``
If set, use this value for max_overflow with sqlalchemy.
(Optional) Defaults to undef.
``rabbit_use_ssl``
(optional) Connect over SSL for RabbitMQ
Defaults to false
``kombu_ssl_ca_certs``
(optional) SSL certification authority file (valid only if SSL enabled).
Defaults to undef
``kombu_ssl_certfile``
(optional) SSL cert file (valid only if SSL enabled).
Defaults to undef
``kombu_ssl_keyfile``
(optional) SSL key file (valid only if SSL enabled).
Defaults to undef
``kombu_ssl_version``
(optional) SSL version to use (valid only if SSL enabled).
Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be
available on some distributions.
Defaults to 'TLSv1'
``amqp_durable_queues``
Use durable queues in amqp.
(Optional) Defaults to false.
``use_syslog``
Use syslog for logging.
(Optional) Defaults to false.
``log_facility``
Syslog facility to receive log lines.
(Optional) Defaults to LOG_USER.
``log_dir``
(optional) Directory where logs should be stored.
If set to boolean false, it will not log to any directory.
Defaults to '/var/log/cinder'
``use_ssl``
(optional) Enable SSL on the API server
Defaults to false, not set
``cert_file``
(optinal) Certificate file to use when starting API server securely
Defaults to false, not set
``key_file``
(optional) Private key file to use when starting API server securely
Defaults to false, not set
``ca_file``
(optional) CA certificate file to use to verify connecting clients
Defaults to false, not set_
``mysql_module``
(optional) Deprecated. Does nothing.
``storage_availability_zone``
(optional) Availability zone of the node.
Defaults to 'nova'
``default_availability_zone``
(optional) Default availability zone for new volumes.
If not set, the storage_availability_zone option value is used as
the default for new volumes.
Defaults to false
``sql_connection``
DEPRECATED
``sql_idle_timeout``
DEPRECATED

View File

@ -1,4 +0,0 @@
class {'cinder':
package_ensure => 'absent',
rabbit_password => 'not important as removed',
}

View File

@ -1,116 +0,0 @@
$resource = hiera($::resource_name)
$ip = $resource['input']['ip']
$db_user = $resource['input']['db_user']
$db_password = $resource['input']['db_password']
$db_name = $resource['input']['db_name']
$db_host = $resource['input']['db_host']
$db_port = $resource['input']['db_port']
$database_connection = $resource['input']['database_connection']
$database_idle_timeout = $resource['input']['database_idle_timeout']
$database_min_pool_size = $resource['input']['database_min_pool_size']
$database_max_pool_size = $resource['input']['database_max_pool_size']
$database_max_retries = $resource['input']['database_max_retries']
$database_retry_interval = $resource['input']['database_retry_interval']
$database_max_overflow = $resource['input']['database_max_overflow']
$rpc_backend = $resource['input']['rpc_backend']
$control_exchange = $resource['input']['control_exchange']
$rabbit_host = $resource['input']['rabbit_host']
$rabbit_port = $resource['input']['rabbit_port']
$rabbit_hosts = $resource['input']['rabbit_hosts']
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']
$rabbit_userid = $resource['input']['rabbit_userid']
$rabbit_password = $resource['input']['rabbit_password']
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']
$kombu_ssl_version = $resource['input']['kombu_ssl_version']
$amqp_durable_queues = $resource['input']['amqp_durable_queues']
$qpid_hostname = $resource['input']['qpid_hostname']
$qpid_port = $resource['input']['qpid_port']
$qpid_username = $resource['input']['qpid_username']
$qpid_password = $resource['input']['qpid_password']
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']
$qpid_reconnect = $resource['input']['qpid_reconnect']
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']
$qpid_heartbeat = $resource['input']['qpid_heartbeat']
$qpid_protocol = $resource['input']['qpid_protocol']
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']
$package_ensure = $resource['input']['package_ensure']
$use_ssl = $resource['input']['use_ssl']
$ca_file = $resource['input']['ca_file']
$cert_file = $resource['input']['cert_file']
$key_file = $resource['input']['key_file']
$api_paste_config = $resource['input']['api_paste_config']
$use_syslog = $resource['input']['use_syslog']
$log_facility = $resource['input']['log_facility']
$log_dir = $resource['input']['log_dir']
$verbose = $resource['input']['verbose']
$debug = $resource['input']['debug']
$storage_availability_zone = $resource['input']['storage_availability_zone']
$default_availability_zone = $resource['input']['default_availability_zone']
$mysql_module = $resource['input']['mysql_module']
# Do not apply the legacy stuff
#$sql_connection = $resource['input']['sql_connection']
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
class {'cinder':
database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}",
database_idle_timeout => $database_idle_timeout,
database_min_pool_size => $database_min_pool_size,
database_max_pool_size => $database_max_pool_size,
database_max_retries => $database_max_retries,
database_retry_interval => $database_retry_interval,
database_max_overflow => $database_max_overflow,
rpc_backend => $rpc_backend,
control_exchange => $control_exchange,
rabbit_host => $rabbit_host,
rabbit_port => $rabbit_port,
rabbit_hosts => $rabbit_hosts,
rabbit_virtual_host => $rabbit_virtual_host,
rabbit_userid => $rabbit_userid,
rabbit_password => $rabbit_password,
rabbit_use_ssl => $rabbit_use_ssl,
kombu_ssl_ca_certs => $kombu_ssl_ca_certs,
kombu_ssl_certfile => $kombu_ssl_certfile,
kombu_ssl_keyfile => $kombu_ssl_keyfile,
kombu_ssl_version => $kombu_ssl_version,
amqp_durable_queues => $amqp_durable_queues,
qpid_hostname => $qpid_hostname,
qpid_port => $qpid_port,
qpid_username => $qpid_username,
qpid_password => $qpid_password,
qpid_sasl_mechanisms => $qpid_sasl_mechanisms,
qpid_reconnect => $qpid_reconnect,
qpid_reconnect_timeout => $qpid_reconnect_timeout,
qpid_reconnect_limit => $qpid_reconnect_limit,
qpid_reconnect_interval_min => $qpid_reconnect_interval_min,
qpid_reconnect_interval_max => $qpid_reconnect_interval_max,
qpid_reconnect_interval => $qpid_reconnect_interval,
qpid_heartbeat => $qpid_heartbeat,
qpid_protocol => $qpid_protocol,
qpid_tcp_nodelay => $qpid_tcp_nodelay,
package_ensure => $package_ensure,
use_ssl => $use_ssl,
ca_file => $ca_file,
cert_file => $cert_file,
key_file => $key_file,
api_paste_config => $api_paste_config,
use_syslog => $use_syslog,
log_facility => $log_facility,
log_dir => $log_dir,
verbose => $verbose,
debug => $debug,
storage_availability_zone => $storage_availability_zone,
default_availability_zone => $default_availability_zone,
mysql_module => $mysql_module,
sql_connection => $sql_connection,
sql_idle_timeout => $sql_idle_timeout,
}

View File

@ -1,215 +0,0 @@
handler: puppet
actions:
run: run.pp
update: run.pp
version: 1.0.0
input:
database_connection:
schema: str
value: 'sqlite:////var/lib/cinder/cinder.sqlite'
database_idle_timeout:
schema: int
value: 3600
database_min_pool_size:
schema: int
value: 1
database_max_pool_size:
schema: str
value:
database_max_retries:
schema: int
value: 10
database_retry_interval:
schema: int
value: 10
database_max_overflow:
schema: str
value:
rpc_backend:
schema: str
value: 'cinder.openstack.common.rpc.impl_kombu'
control_exchange:
schema: str
value: 'openstack'
rabbit_host:
schema: str
value: '127.0.0.1'
rabbit_port:
schema: int
value: 5672
rabbit_hosts:
schema: bool
value: false
rabbit_virtual_host:
schema: str
value: '/'
rabbit_userid:
schema: str
value: 'guest'
rabbit_password:
schema: str!
value: 'rabbit'
rabbit_use_ssl:
schema: bool
value: false
kombu_ssl_ca_certs:
schema: str
value:
kombu_ssl_certfile:
schema: str
value:
kombu_ssl_keyfile:
schema: str
value:
kombu_ssl_version:
schema: str
value: 'TLSv1'
amqp_durable_queues:
schema: bool
value: false
qpid_hostname:
schema: str
value: 'localhost'
qpid_port:
schema: int
value: 5672
qpid_username:
schema: str
value: 'guest'
qpid_password:
schema: str!
value: 'qpid'
qpid_sasl_mechanisms:
schema: bool
value: false
qpid_reconnect:
schema: bool
value: true
qpid_reconnect_timeout:
schema: int
value: 0
qpid_reconnect_limit:
schema: int
value: 0
qpid_reconnect_interval_min:
schema: int
value: 0
qpid_reconnect_interval_max:
schema: int
value: 0
qpid_reconnect_interval:
schema: int
value: 0
qpid_heartbeat:
schema: int
value: 60
qpid_protocol:
schema: str
value: 'tcp'
qpid_tcp_nodelay:
schema: bool
value: true
package_ensure:
schema: str
value: 'present'
use_ssl:
schema: bool
value: false
ca_file:
schema: bool
value: false
cert_file:
schema: bool
value: false
key_file:
schema: bool
value: false
api_paste_config:
schema: str
value: '/etc/cinder/api-paste.ini'
use_syslog:
schema: bool
value: false
log_facility:
schema: str
value: 'LOG_USER'
log_dir:
schema: str
value: '/var/log/cinder'
verbose:
schema: bool
value: false
debug:
schema: bool
value: false
storage_availability_zone:
schema: str
value: 'nova'
default_availability_zone:
schema: bool
value: false
mysql_module:
schema: str
value:
sql_connection:
schema: str
value:
sql_idle_timeout:
schema: str
value:
db_user:
schema: str!
value: cinder
db_password:
schema: str!
value: cinder
db_name:
schema: str!
value: cinder
db_host:
schema: str!
value:
db_port:
schema: int!
value:
port:
schema: int!
value: 8776
module:
schema: {name: str!, type: str, url: str, ref: str}
value: {name: 'cinder', type: 'git', url: 'https://github.com/openstack/puppet-cinder', ref: '5.1.0'}
keystone_host:
schema: str!
value:
keystone_port:
schema: int!
value:
keystone_user:
schema: str!
value:
keystone_password:
schema: str!
value:
keystone_tenant:
schema: str!
value:
# forge:
# schema: str!
# value: 'stackforge-cinder'
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
tags: [resource/cinder_service, resources/cinder]

View File

@ -1,10 +0,0 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing cinder_puppet')
requests.get(
'http://%s:%s' % (resource.args['ip'], resource.args['port'])
)

View File

@ -1,3 +0,0 @@
# Cinder Scheduler resource for puppet handler
Setup and configure the cinder scheduler service

View File

@ -1,4 +0,0 @@
class {'cinder::scheduler':
enabled => false,
package_ensure => 'absent',
}

View File

@ -1,18 +0,0 @@
$resource = hiera($::resource_name)
$scheduler_driver = $resource['input']['scheduler_driver']
$package_ensure = $resource['input']['package_ensure']
include cinder::params
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
} ->
class {'cinder::scheduler':
scheduler_driver => $scheduler_driver,
package_ensure => $package_ensure,
enabled => true,
manage_service => true,
}

View File

@ -1,22 +0,0 @@
$resource = hiera($::resource_name)
$scheduler_driver = $resource['input']['scheduler_driver']
$package_ensure = $resource['input']['package_ensure']
include cinder::params
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
} ->
class {'cinder::scheduler':
scheduler_driver => $scheduler_driver,
package_ensure => $package_ensure,
enabled => true,
manage_service => true,
}
notify { "restart cinder volume":
notify => Service["cinder-scheduler"],
}

View File

@ -1,25 +0,0 @@
handler: puppet
version: 1.0.0
input:
scheduler_driver:
schema: str
value:
package_ensure:
schema: str
value: 'present'
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
tags: [resource/cinder_scheduler_service, resources/cinder_scheduler, resources/cinder]

View File

@ -1,12 +0,0 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing cinder_scheduler_puppet')
# requests.get(
# 'http://%s:%s' % (resource.args['ip'], resource.args['port'])
# TODO(bogdando) figure out how to test this
# http://docs.openstack.org/developer/nova/devref/scheduler.html
# )

View File

@ -1,8 +0,0 @@
# Cinder Volume resource for puppet handler
Setup and configure the cinder volume service.
Configure ISCSI volume backend for cinder volume nodes, if specified
(Default true)
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/volume/iscsi.pp
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/volume.pp

View File

@ -1,4 +0,0 @@
class {'cinder::volume':
enabled => false,
package_ensure => 'absent',
}

View File

@ -1,31 +0,0 @@
$resource = hiera($::resource_name)
$package_ensure = $resource['input']['package_ensure']
$use_iscsi_backend = $resource['input']['use_iscsi_backend']
$iscsi_ip_address = $resource['input']['iscsi_ip_address']
$volume_driver = $resource['input']['volume_driver']
$volume_group = $resource['input']['volume_group']
$iscsi_helper = $resource['input']['iscsi_helper']
include cinder::params
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
} ->
class {'cinder::volume':
package_ensure => $package_ensure,
enabled => true,
manage_service => true,
}
if $use_iscsi_backend {
class {'cinder::volume::iscsi':
iscsi_ip_address => $iscsi_ip_address,
volume_driver => $volume_driver,
volume_group => $volume_group,
iscsi_helper => $iscsi_helper,
}
}

View File

@ -1,26 +0,0 @@
$resource = hiera($::resource_name)
$package_ensure = $resource['input']['package_ensure']
$use_iscsi_backend = $resource['input']['use_iscsi_backend']
$iscsi_ip_address = $resource['input']['iscsi_ip_address']
$volume_driver = $resource['input']['volume_driver']
$volume_group = $resource['input']['volume_group']
$iscsi_helper = $resource['input']['iscsi_helper']
include cinder::params
package { 'cinder':
ensure => $package_ensure,
name => $::cinder::params::package_name,
} ->
class {'cinder::volume':
package_ensure => $package_ensure,
enabled => true,
manage_service => true,
}
notify { "restart cinder volume":
notify => Service["cinder-volume"],
}

View File

@ -1,38 +0,0 @@
handler: puppet
version: 1.0.0
input:
package_ensure:
schema: str
value: 'present'
iscsi_ip_address:
schema: str
value: '127.0.0.1'
volume_driver:
schema: str
value: 'cinder.volume.drivers.lvm.LVMISCSIDriver'
volume_group:
schema: str
value: 'cinder-volumes'
iscsi_helper:
schema: str
value: 'tgtadm'
use_iscsi_backend:
schema: bool
value: true
git:
schema: {repository: str!, branch: str!}
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
tags: [resource/cinder_volume_service, resources/cinder_volume, resources/cinder]

View File

@ -1,12 +0,0 @@
import requests
from solar.core.log import log
def test(resource):
log.debug('Testing cinder_volume_puppet')
# requests.get(
# 'http://%s:%s' % (resource.args['ip'], resource.args['port'])
# TODO(bogdando) figure out how to test this
# http://docs.openstack.org/developer/nova/devref/volume.html
# )

View File

@ -1,22 +0,0 @@
- hosts: '*'
sudo: yes
gather_facts: false
# this is default variables, they will be overwritten by resource one
vars:
networks:
mgmt:
address: 172.18.10.6
bridge: br-test0
bridge_address: 172.18.10.252/24
interface: eth1
netmask: 255.255.255.0
type: veth
tasks:
- shell: ip l add {{item.value.bridge}} type bridge
with_dict: networks
ignore_errors: true
- shell: ip l set {{item.value.bridge}} up
with_dict: networks
- shell: ip a add dev {{item.value.bridge}} {{item.value.bridge_address}}
with_dict: networks
ignore_errors: true

View File

@ -1,16 +0,0 @@
handler: ansible_playbook
version: 1.0.0
actions:
input:
ip:
schema: str!
value:
# ssh_key:
# schema: str!
# value:
# ssh_user:
# schema: str!
# value:
networks:
schema: {}
value:

View File

@ -1,5 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: echo `/sbin/ifconfig`

View File

@ -1,6 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: docker stop {{ resource_name }}
- shell: docker rm {{ resource_name }}

View File

@ -1,24 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- docker:
name: {{ resource_name }}
image: {{ image }}
state: running
net: host
{% if ports.value %}
ports:
{% for port in ports.value %}
- {{ port['value'] }}:{{ port['value'] }}
{% endfor %}
{% endif %}
{% if host_binds.value %}
volumes:
# TODO: host_binds might need more work
# Currently it's not that trivial to pass custom src: dst here
# (when a config variable is passed here from other resource)
# so we mount it to the same directory as on host
{% for bind in host_binds.value %}
- {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }}
{% endfor %}
{% endif %}

View File

@ -1,12 +0,0 @@
handler: ansible
version: 1.0.0
input:
ip:
type: str!
value:
image:
type: str!
value:
export_volumes:
type: str!
value:

View File

@ -1,6 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- lineinfile: create=yes dest=/etc/dnsmasq.d/no_pxe_{{exclude_mac_pxe | replace(':', '_')}}.conf line="dhcp-host={{exclude_mac_pxe}},set:nopxe"
- shell: service dnsmasq restart

View File

@ -1,2 +0,0 @@
- hosts: [{{host}}]
sudo: yes

View File

@ -1,17 +0,0 @@
handler: ansible
version: 1.0.0
actions:
exclude_mac_pxe: exclude_mac_pxe.yaml
run: run.yaml
input:
ip:
schema: str!
value:
exclude_mac_pxe:
schema: str!
value:
tags: [resources=dnsmasq]

View File

@ -1,9 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: docker --version
ignore_errors: true
register: docker_version
- shell: curl -sSL https://get.docker.com/ | sudo sh
when: docker_version|failed

View File

@ -1,15 +0,0 @@
handler: ansible
version: 1.0.0
input:
ip:
schema: str!
value:
# ssh_user:
# schema: str!
# value:
# ssh_key:
# schema: str!
# value:
tags: [resources/docker]

View File

@ -1,6 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- shell: docker stop {{ resource_name }}
- shell: docker rm {{ resource_name }}

View File

@ -1,37 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- docker:
name: {{ resource_name }}
image: {{ image }}
state: running
net: host
{% if ports %}
ports:
{% for port in ports %}
- {{ port }}:{{ port }}
{% endfor %}
expose:
{% for port in ports %}
- {{ port }}
{% endfor %}
{% endif %}
{% if host_binds.value %}
volumes:
# TODO: host_binds might need more work
# Currently it's not that trivial to pass custom src: dst here
# (when a config variable is passed here from other resource)
# so we mount it to the same directory as on host
{% for bind in host_binds.value %}
- {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }}
{% endfor %}
{% endif %}
{% if env %}
env:
{% for key, value in env.iteritems() %}
{{ key }}: {{ value }}
{% endfor %}
{% endif %}

View File

@ -1,37 +0,0 @@
- hosts: [{{host}}]
sudo: yes
tasks:
- docker:
name: {{ resource_name }}
image: {{ image }}
state: reloaded
net: host
{% if ports %}
ports:
{% for port in ports %}
- {{ port }}:{{ port }}
{% endfor %}
expose:
{% for port in ports %}
- {{ port }}
{% endfor %}
{% endif %}
{% if host_binds.value %}
volumes:
# TODO: host_binds might need more work
# Currently it's not that trivial to pass custom src: dst here
# (when a config variable is passed here from other resource)
# so we mount it to the same directory as on host
{% for bind in host_binds.value %}
- {{ bind['value']['src'] }}:{{ bind['value']['dst'] }}:{{ bind['value'].get('mode', 'ro') }}
{% endfor %}
{% endif %}
{% if env %}
env:
{% for key, value in env.iteritems() %}
{{ key }}: {{ value }}
{% endfor %}
{% endif %}

View File

@ -1,29 +0,0 @@
handler: ansible
version: 1.0.0
input:
ip:
schema: str!
value:
image:
schema: str!
value:
ports:
schema: [int]
value: []
host_binds:
schema: [{value: {src: str, dst: str, mode: str}}]
value: []
volume_binds:
schema: [{src: str, dst: str, mode: str}]
value: []
env:
schema: {}
value: {}
# ssh_user:
# schema: str!
# value: []
# ssh_key:
# schema: str!
# value: []
tags: [resource/container]

View File

@ -1,10 +0,0 @@
#!/usr/bin/env python
import sys
import json
data = json.loads(sys.stdin.read())
rst = {'val_x_val': int(data['val'])**2}
sys.stdout.write(json.dumps(rst))

View File

@ -1,11 +0,0 @@
handler: none
version: 1.0.0
managers:
- managers/manager.py
input:
val:
schema: int!
value: 2
val_x_val:
schema: int
value:

View File

@ -1,3 +0,0 @@
#!/bin/bash
rm {{ path }}

View File

@ -1,3 +0,0 @@
#!/bin/bash
touch {{ path }}

Some files were not shown because too many files have changed in this diff Show More