Merge branch 'master' into dry-run
This commit is contained in:
commit
2daf795172
@ -61,7 +61,9 @@ solar connect mariadb_service keystone_db '{"root_password": "login_password", "
|
||||
solar connect keystone_db keystone_db_user
|
||||
|
||||
solar changes stage
|
||||
solar changes commit
|
||||
solar changes proccess
|
||||
<uid>
|
||||
solar orch run-once <uid>
|
||||
```
|
||||
|
||||
You can fiddle with the above configuration like this:
|
||||
@ -70,7 +72,9 @@ solar resource update keystone_db_user '{"user_password": "new_keystone_password
|
||||
solar resource update keystone_db_user user_password=new_keystone_password # another valid format
|
||||
|
||||
solar changes stage
|
||||
solar changes commit
|
||||
solar changes proccess
|
||||
<uid>
|
||||
solar orch run-once <uid>
|
||||
```
|
||||
|
||||
* Show the connections/graph:
|
||||
|
4
Vagrantfile
vendored
4
Vagrantfile
vendored
@ -14,9 +14,11 @@ SCRIPT
|
||||
|
||||
slave_script = <<SCRIPT
|
||||
apt-get update
|
||||
apt-get upgrade
|
||||
apt-get dist-upgrade
|
||||
apt-get -y install python-pip python-dev
|
||||
pip install ansible
|
||||
ansible-playbook -i "localhost," -c local /vagrant/main.yml /vagrant/docker.yml /vagrant/slave.yml
|
||||
ansible-playbook -i "localhost," -c local /vagrant/main.yml /vagrant/docker.yml /vagrant/slave.yml /vagrant/slave_cinder.yml
|
||||
SCRIPT
|
||||
|
||||
master_celery = <<SCRIPT
|
||||
|
@ -10,9 +10,9 @@
|
||||
- shell: celery multi kill 2
|
||||
chdir={{celery_dir}}
|
||||
tags: [stop]
|
||||
- shell: celery multi start 2 -A solar.orchestration.tasks -Q:1 celery,scheduler -Q:2 celery,{{hostname.stdout}}
|
||||
- shell: celery multi start 2 -A solar.orchestration.runner -Q:1 scheduler,system_log -Q:2 celery,{{hostname.stdout}}
|
||||
chdir={{celery_dir}}
|
||||
tags: [master]
|
||||
- shell: celery multi start 1 -A solar.orchestration.tasks -Q:1 celery,{{hostname.stdout}}
|
||||
- shell: celery multi start 1 -A solar.orchestration.runner -Q:1 {{hostname.stdout}}
|
||||
chdir={{celery_dir}}
|
||||
tags: [slave]
|
||||
tags: [slave]
|
||||
|
65
docs/orchestration.md
Normal file
65
docs/orchestration.md
Normal file
@ -0,0 +1,65 @@
|
||||
# Overview of orchestration commands and system log integration
|
||||
|
||||
After user created all required resource - it is possible to automatically
|
||||
detect which resource requires changes with
|
||||
|
||||
```
|
||||
solar changes stage
|
||||
```
|
||||
|
||||
After changes are staged - they will be used to populate history which can be seen
|
||||
with command (*n* option used to limit number of items, -1 will return all changes)
|
||||
|
||||
```
|
||||
solar changes history -n 5
|
||||
```
|
||||
|
||||
User is able to generate deployment scenario based on changes found by system log.
|
||||
```
|
||||
solar changes process
|
||||
```
|
||||
|
||||
This command will prepare deployment graph, and return uid of deployment graph to
|
||||
work with.
|
||||
|
||||
All commands that are able to manipulate deployment graph located in
|
||||
*orch* namespace.
|
||||
|
||||
Report will print all deployment tasks in topological order, with status,
|
||||
and error if status of task is *ERROR*
|
||||
```
|
||||
solar orch report <uid>
|
||||
```
|
||||
|
||||
To see picture of deployment dependencies one can use following command
|
||||
```
|
||||
solar orch dg <uid>
|
||||
```
|
||||
Keep in mind that it is not representation of all edges that are kept in graph,
|
||||
we are using trasitive reduction to leave only edges that are important for the
|
||||
order of traversal.
|
||||
|
||||
Execute deployment
|
||||
```
|
||||
solar orch run-once <uid>
|
||||
```
|
||||
|
||||
Gracefully stop deployment, after all already scheduled tasks are finished
|
||||
```
|
||||
solar orch stop <uid>
|
||||
```
|
||||
|
||||
Continue deployment execution for all tasks that are SKIPPED
|
||||
```
|
||||
solar orch resume <uid>
|
||||
```
|
||||
|
||||
All tasks will be returned to PENDING state, and deployment will be restarted
|
||||
```
|
||||
solar orch restart <uid>
|
||||
```
|
||||
|
||||
Orchestrator will retry tasks in ERROR state and continue execution
|
||||
```
|
||||
solar orch retry <uid>
|
||||
```
|
@ -7,6 +7,7 @@ from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
@ -69,13 +70,13 @@ def deploy():
|
||||
|
||||
# KEYSTONE
|
||||
keystone_puppet = vr.create('keystone_puppet', 'resources/keystone_puppet', {})[0]
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_keystone_db/', {
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_db/', {
|
||||
'db_name': 'keystone_db',
|
||||
'login_user': 'root'
|
||||
})[0]
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_keystone_user/', {
|
||||
'new_user_name': 'keystone',
|
||||
'new_user_password': 'keystone',
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_user/', {
|
||||
'user_name': 'keystone',
|
||||
'user_password': 'keystone',
|
||||
})[0]
|
||||
keystone_service_endpoint = vr.create('keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'keystone',
|
||||
@ -107,20 +108,22 @@ def deploy():
|
||||
'port': 'login_port',
|
||||
'root_user': 'login_user',
|
||||
'root_password': 'login_password',
|
||||
'ip' : 'db_host',
|
||||
})
|
||||
signals.connect(keystone_db, keystone_db_user, {
|
||||
'db_name',
|
||||
'login_port',
|
||||
'login_user',
|
||||
'login_password'
|
||||
'login_password',
|
||||
'db_host'
|
||||
})
|
||||
|
||||
signals.connect(node1, keystone_service_endpoint)
|
||||
signals.connect(keystone_puppet, keystone_service_endpoint, {
|
||||
'admin_token': 'admin_token',
|
||||
'admin_port': 'keystone_admin_port',
|
||||
'admin_port': ['admin_port', 'keystone_admin_port'],
|
||||
'ip': ['keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
|
||||
'port': ['admin_port', 'internal_port', 'public_port'],
|
||||
'port': ['internal_port', 'public_port'],
|
||||
})
|
||||
|
||||
signals.connect(keystone_puppet, admin_tenant)
|
||||
@ -141,8 +144,9 @@ def deploy():
|
||||
'db_name',
|
||||
})
|
||||
signals.connect(keystone_db_user, keystone_puppet, {
|
||||
'new_user_name': 'db_user',
|
||||
'new_user_password': 'db_password'
|
||||
'user_name': 'db_user',
|
||||
'user_password': 'db_password',
|
||||
'db_host' : 'db_host'
|
||||
})
|
||||
|
||||
# OPENRC
|
||||
@ -154,17 +158,14 @@ def deploy():
|
||||
|
||||
# NEUTRON
|
||||
# TODO: vhost cannot be specified in neutron Puppet manifests so this user has to be admin anyways
|
||||
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {
|
||||
'rabbitmq_user': 'guest',
|
||||
'rabbitmq_password': 'guest'
|
||||
})[0]
|
||||
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {})[0]
|
||||
|
||||
neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', {
|
||||
'user_name': 'neutron',
|
||||
'user_password': 'neutron'
|
||||
})[0]
|
||||
neutron_keystone_role = vr.create('neutron_keystone_role', 'resources/keystone_role', {
|
||||
'role_name': 'neutron'
|
||||
'role_name': 'admin'
|
||||
})[0]
|
||||
neutron_keystone_service_endpoint = vr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'neutron',
|
||||
@ -180,6 +181,11 @@ def deploy():
|
||||
'ip': 'rabbitmq_host',
|
||||
'port': 'rabbitmq_port'
|
||||
})
|
||||
signals.connect(openstack_rabbitmq_user, neutron_puppet, {
|
||||
'user_name': 'rabbitmq_user',
|
||||
'password': 'rabbitmq_password'})
|
||||
signals.connect(openstack_vhost, neutron_puppet, {
|
||||
'vhost_name': 'rabbitmq_virtual_host'})
|
||||
signals.connect(admin_user, neutron_puppet, {
|
||||
'user_name': 'keystone_user',
|
||||
'user_password': 'keystone_password',
|
||||
@ -203,60 +209,147 @@ def deploy():
|
||||
'port': ['admin_port', 'internal_port', 'public_port'],
|
||||
})
|
||||
|
||||
# # CINDER
|
||||
# cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {
|
||||
# 'rabbit_userid': 'guest', 'rabbit_password': 'guest'})[0]
|
||||
# cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
|
||||
# 'db_name': 'cinder_db', 'login_user': 'root'})[0]
|
||||
# cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', {
|
||||
# 'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
|
||||
# cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {
|
||||
# 'user_name': 'cinder', 'user_password': 'cinder'})[0]
|
||||
# cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {
|
||||
# 'role_name': 'cinder'})[0]
|
||||
# cinder_keystone_service_endpoint = vr.create(
|
||||
# 'cinder_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
# 'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
|
||||
# 'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
|
||||
# 'publicurl': 'http://{{public_ip}}:{{public_port}}',
|
||||
# 'description': 'OpenStack Network Service', 'type': 'network'})[0]
|
||||
# CINDER
|
||||
cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0]
|
||||
cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
|
||||
'db_name': 'cinder_db', 'login_user': 'root'})[0]
|
||||
cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', {
|
||||
'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
|
||||
cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {
|
||||
'user_name': 'cinder', 'user_password': 'cinder'})[0]
|
||||
cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {
|
||||
'role_name': 'admin'})[0]
|
||||
cinder_keystone_service_endpoint = vr.create(
|
||||
'cinder_keystone_service_endpoint',
|
||||
'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'cinder',
|
||||
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
|
||||
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
|
||||
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
|
||||
'description': 'OpenStack Block Storage Service', 'type': 'volume'})[0]
|
||||
|
||||
signals.connect(node1, cinder_puppet)
|
||||
signals.connect(node1, cinder_db)
|
||||
signals.connect(node1, cinder_db_user)
|
||||
signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
|
||||
signals.connect(admin_user, cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #?
|
||||
signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
|
||||
signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
|
||||
signals.connect(mariadb_service1, cinder_db, {
|
||||
'port': 'login_port',
|
||||
'root_password': 'login_password',
|
||||
'root_user': 'login_user',
|
||||
'ip' : 'db_host'})
|
||||
signals.connect(mariadb_service1, cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'})
|
||||
signals.connect(cinder_db, cinder_db_user, {'db_name', 'db_host'})
|
||||
signals.connect(cinder_db_user, cinder_puppet, {
|
||||
'user_name':'db_user',
|
||||
'db_name':'db_name',
|
||||
'user_password':'db_password',
|
||||
'db_host' : 'db_host'})
|
||||
signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port?
|
||||
signals.connect(services_tenant, cinder_keystone_user)
|
||||
signals.connect(cinder_keystone_user, cinder_keystone_role)
|
||||
signals.connect(cinder_keystone_user, cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'})
|
||||
signals.connect(mariadb_service1, cinder_puppet, {'ip':'ip'})
|
||||
signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {
|
||||
'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user',
|
||||
'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
|
||||
'port': ['admin_port', 'internal_port', 'public_port'],})
|
||||
signals.connect(keystone_puppet, cinder_keystone_service_endpoint, {
|
||||
'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
|
||||
|
||||
# CINDER API
|
||||
cinder_api_puppet = vr.create('cinder_api_puppet', 'resources/cinder_api_puppet', {})[0]
|
||||
signals.connect(node1, cinder_api_puppet)
|
||||
signals.connect(cinder_puppet, cinder_api_puppet, {
|
||||
'keystone_password', 'keystone_tenant', 'keystone_user'})
|
||||
signals.connect(cinder_puppet, cinder_api_puppet, {
|
||||
'keystone_host': 'keystone_auth_host',
|
||||
'keystone_port': 'keystone_auth_port'})
|
||||
|
||||
# signals.connect(node1, cinder_db)
|
||||
# signals.connect(node1, cinder_db_user)
|
||||
# signals.connect(node1, cinder_puppet)
|
||||
# signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
|
||||
# signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
|
||||
# signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
|
||||
# signals.connect(mariadb_service1, cinder_db, {
|
||||
# 'port': 'login_port', 'root_password': 'login_password'})
|
||||
# signals.connect(mariadb_service1, cinder_db_user, {
|
||||
# 'port': 'login_port', 'root_password': 'login_password'})
|
||||
# signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'})
|
||||
|
||||
# signals.connect(services_tenant, cinder_keystone_user)
|
||||
# signals.connect(cinder_keystone_user, cinder_keystone_role)
|
||||
# CINDER SCHEDULER
|
||||
cinder_scheduler_puppet = vr.create('cinder_scheduler_puppet', 'resources/cinder_scheduler_puppet', {})[0]
|
||||
signals.connect(node1, cinder_scheduler_puppet)
|
||||
signals.connect(cinder_puppet, cinder_scheduler_puppet)
|
||||
|
||||
# CINDER VOLUME
|
||||
cinder_volume_puppet = vr.create('cinder_volume_puppet', 'resources/cinder_volume_puppet', {})[0]
|
||||
signals.connect(node1, cinder_volume_puppet)
|
||||
signals.connect(cinder_puppet, cinder_volume_puppet)
|
||||
|
||||
# NOVA
|
||||
# #nova_network_puppet = vr.create('nova_network_puppet', GitProvider(GIT_PUPPET_LIBS_URL, 'nova_network'), {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
# # TODO: fix rabbitmq user/password
|
||||
# nova_network_puppet = vr.create('nova_network_puppet', 'resources/nova_network_puppet', {'rabbitmq_user': 'guest', 'rabbitmq_password': 'guest'})[0]
|
||||
nova_api = vr.create('nova_api', 'resources/nova_api_puppet', {})[0]
|
||||
nova_db = vr.create('nova_db', 'resources/mariadb_db/', {
|
||||
'db_name': 'nova_db',
|
||||
'login_user': 'root'})[0]
|
||||
nova_db_user = vr.create('nova_db_user', 'resources/mariadb_user/', {
|
||||
'user_name': 'nova',
|
||||
'user_password': 'nova',
|
||||
'login_user': 'root'})[0]
|
||||
nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', {
|
||||
'user_name': 'nova',
|
||||
'user_password': 'nova'})[0]
|
||||
nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', {
|
||||
'role_name': 'admin'})[0]
|
||||
nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'nova',
|
||||
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
|
||||
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
|
||||
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
|
||||
'description': 'OpenStack Compute Service',
|
||||
'type': 'compute',
|
||||
'public_port': 8774,
|
||||
'internal_port': 8774,
|
||||
'admin_port': 8774})[0]
|
||||
|
||||
# nova_keystone_user = vr.create('nova_keystone_user', 'resources/keystone_user', {'user_name': 'nova', 'user_password': 'nova'})[0]
|
||||
# nova_keystone_role = vr.create('nova_keystone_role', 'resources/keystone_role', {'role_name': 'nova'})[0]
|
||||
signals.connect(node1, nova_api)
|
||||
signals.connect(node1, nova_db)
|
||||
signals.connect(node1, nova_db_user)
|
||||
signals.connect(mariadb_service1, nova_db, {
|
||||
'port': 'login_port',
|
||||
'root_password': 'login_password',
|
||||
'root_user': 'login_user',
|
||||
'ip' : 'db_host'})
|
||||
signals.connect(mariadb_service1, nova_db_user, {
|
||||
'port': 'login_port',
|
||||
'root_password': 'login_password'})
|
||||
signals.connect(nova_db, nova_db_user, {'db_name', 'db_host'})
|
||||
signals.connect(services_tenant, nova_keystone_user)
|
||||
signals.connect(nova_keystone_user, nova_keystone_role)
|
||||
signals.connect(keystone_puppet, nova_api, {
|
||||
'ip': 'keystone_host',
|
||||
'admin_port': 'keystone_port'})
|
||||
signals.connect(nova_keystone_user, nova_api, {
|
||||
'user_name': 'keystone_user_name',
|
||||
'tenant_name': 'keystone_tenant_name',
|
||||
'user_password': 'keystone_password'})
|
||||
signals.connect(rabbitmq_service1, nova_api, {
|
||||
'ip': 'rabbitmq_host'})
|
||||
signals.connect(openstack_rabbitmq_user, nova_api, {
|
||||
'user_name': 'rabbitmq_user',
|
||||
'password': 'rabbitmq_password'})
|
||||
signals.connect(keystone_puppet, nova_keystone_service_endpoint, {
|
||||
'ip': 'keystone_host',
|
||||
'admin_port': 'keystone_admin_port',
|
||||
'admin_token': 'admin_token'})
|
||||
signals.connect(mariadb_service1, nova_api, {
|
||||
'ip':'db_host'})
|
||||
signals.connect(nova_db_user, nova_api, {
|
||||
'user_name':'db_user',
|
||||
'db_name':'db_name',
|
||||
'user_password':'db_password',
|
||||
'db_host' : 'db_host'})
|
||||
signals.connect(nova_api, nova_keystone_service_endpoint, {
|
||||
'ip': ['ip', 'public_ip', 'internal_ip', 'admin_ip'],
|
||||
'ssh_key': 'ssh_key',
|
||||
'ssh_user': 'ssh_user'})
|
||||
signals.connect(nova_api, nova_keystone_service_endpoint, {
|
||||
'ip': 'ip',
|
||||
'ssh_key': 'ssh_key',
|
||||
'ssh_user': 'ssh_user'})
|
||||
|
||||
# TODO: 'services' tenant-id is hardcoded
|
||||
# nova_keystone_service_endpoint = vr.create('nova_keystone_service_endpoint', 'resources/keystone_service_endpoint', {'adminurl': 'http://{{ip}}:{{admin_port}}/v2/services', 'internalurl': 'http://{{ip}}:{{public_port}}/v2/services', 'publicurl': 'http://{{ip}}:{{port}}/v2/services', 'description': 'OpenStack Compute Service', 'type': 'compute', 'port': 8776, 'admin_port': 8776})[0]
|
||||
|
||||
# signals.connect(node1, nova_network_puppet)
|
||||
|
||||
# signals.connect(services_tenant, nova_keystone_user)
|
||||
# signals.connect(neutron_keystone_user, nova_keystone_role)
|
||||
|
||||
# signals.connect(nova_keystone_user, nova_network_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'})
|
||||
# signals.connect(keystone_puppet, nova_network_puppet, {'ip': 'keystone_host', 'port': 'keystone_port'})
|
||||
|
||||
# signals.connect(nova_network_puppet, nova_keystone_service_endpoint, {'ip': 'ip', 'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user'})
|
||||
# signals.connect(keystone_puppet, nova_keystone_service_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port', 'admin_token': 'admin_token'})
|
||||
# signals.connect(rabbitmq_service1, nova_network_puppet, {'ip': 'rabbitmq_host', 'port': 'rabbitmq_port'})
|
||||
|
||||
@ -293,26 +386,28 @@ def deploy():
|
||||
actions.resource_action(admin_role, 'run')
|
||||
|
||||
actions.resource_action(keystone_service_endpoint, 'run')
|
||||
|
||||
actions.resource_action(services_tenant, 'run')
|
||||
|
||||
actions.resource_action(neutron_keystone_user, 'run')
|
||||
actions.resource_action(neutron_keystone_role, 'run')
|
||||
|
||||
actions.resource_action(neutron_puppet, 'run')
|
||||
actions.resource_action(neutron_keystone_service_endpoint, 'run')
|
||||
|
||||
# actions.resource_action(cinder_db, 'run')
|
||||
# actions.resource_action(cinder_db_user, 'run')
|
||||
# actions.resource_action(cinder_keystone_user, 'run')
|
||||
# actions.resource_action(cinder_keystone_role, 'run')
|
||||
|
||||
# actions.resource_action(cinder_puppet, 'run')
|
||||
|
||||
# actions.resource_action(nova_keystone_user, 'run')
|
||||
# actions.resource_action(nova_keystone_role, 'run')
|
||||
|
||||
# actions.resource_action(nova_network_puppet, 'run')
|
||||
#actions.resource_action(nova_keystone_service_endpoint, 'run')
|
||||
actions.resource_action(cinder_db, 'run')
|
||||
actions.resource_action(cinder_db_user, 'run')
|
||||
actions.resource_action(cinder_keystone_user, 'run')
|
||||
actions.resource_action(cinder_keystone_role, 'run')
|
||||
actions.resource_action(cinder_puppet, 'run')
|
||||
actions.resource_action(cinder_keystone_service_endpoint, 'run')
|
||||
actions.resource_action(cinder_api_puppet, 'run')
|
||||
actions.resource_action(cinder_scheduler_puppet, 'run')
|
||||
actions.resource_action(cinder_volume_puppet, 'run')
|
||||
actions.resource_action(nova_db, 'run')
|
||||
actions.resource_action(nova_db_user, 'run')
|
||||
actions.resource_action(nova_keystone_user, 'run')
|
||||
actions.resource_action(nova_keystone_role, 'run')
|
||||
actions.resource_action(nova_api, 'run')
|
||||
actions.resource_action(nova_keystone_service_endpoint, 'run')
|
||||
|
||||
time.sleep(10)
|
||||
|
||||
@ -322,12 +417,25 @@ def undeploy():
|
||||
db = get_db()
|
||||
|
||||
to_remove = [
|
||||
'nova_db',
|
||||
'nova_db_user',
|
||||
'nova_keystone_service_endpoint',
|
||||
'nova_api',
|
||||
'cinder_volume_puppet',
|
||||
'cinder_scheduler_puppet',
|
||||
'cinder_api_puppet',
|
||||
'cinder_keystone_service_endpoint',
|
||||
'cinder_puppet',
|
||||
'cinder_keystone_role',
|
||||
'cinder_keystone_user',
|
||||
'cinder_db_user',
|
||||
'cinder_db',
|
||||
'neutron_keystone_service_endpoint',
|
||||
'neutron_puppet',
|
||||
'neutron_keystone_role',
|
||||
'neutron_keystone_user',
|
||||
'services_tenant',
|
||||
#'keystone_service_endpoint',
|
||||
'keystone_service_endpoint',
|
||||
'admin_role',
|
||||
'admin_user',
|
||||
'admin_tenant',
|
||||
@ -338,14 +446,17 @@ def undeploy():
|
||||
'mariadb_service1',
|
||||
'openstack_rabbitmq_user',
|
||||
'openstack_vhost',
|
||||
'rabbitmq1',
|
||||
'rabbitmq_service1',
|
||||
]
|
||||
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in to_remove:
|
||||
actions.resource_action(resources[name], 'remove')
|
||||
try:
|
||||
actions.resource_action(resources[name], 'remove')
|
||||
except errors.SolarError as e:
|
||||
print 'WARNING: %s' % str(e)
|
||||
|
||||
#actions.resource_action(resources['nova_keystone_service_endpoint'], 'remove' )
|
||||
# actions.resource_action(resources['nova_network_puppet'], 'remove' )
|
||||
@ -376,7 +487,7 @@ def undeploy():
|
||||
|
||||
# actions.resource_action(resources['openstack_rabbitmq_user'], 'remove')
|
||||
# actions.resource_action(resources['openstack_vhost'], 'remove')
|
||||
# actions.resource_action(resources['rabbitmq1'], 'remove')
|
||||
# actions.resource_action(resources['rabbitmq_service1'], 'remove')
|
||||
|
||||
db.clear()
|
||||
|
||||
|
@ -1,8 +1,6 @@
|
||||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copied from: https://github.com/openstack-ansible/openstack-ansible-modules/blob/master/keystone_service
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: keystone_service
|
||||
@ -124,116 +122,116 @@ def get_endpoint(keystone, name):
|
||||
return endpoints[0]
|
||||
|
||||
|
||||
def ensure_service_present(keystone, name, service_type, description,
|
||||
check_mode):
|
||||
""" Ensure the service is present and has the right values
|
||||
def ensure_present(keystone, name, service_type, description, public_url,
|
||||
internal_url, admin_url, region, check_mode):
|
||||
""" Ensure the service and its endpoint are present and have the right values.
|
||||
|
||||
Returns a pair, where the first element is a boolean that indicates
|
||||
a state change, and the second element is the service uuid, or None
|
||||
if running in check mode"""
|
||||
Returns a tuple, where the first element is a boolean that indicates
|
||||
a state change, the second element is the service uuid (or None in
|
||||
check mode), and the third element is the endpoint uuid (or None in
|
||||
check mode)."""
|
||||
# Fetch service and endpoint, if they exist.
|
||||
service = None
|
||||
try:
|
||||
service = get_service(keystone, name)
|
||||
except:
|
||||
# Service doesn't exist yet, we'll need to create one
|
||||
pass
|
||||
else:
|
||||
# See if it matches exactly
|
||||
if service.name == name and \
|
||||
service.type == service_type and \
|
||||
service.description == description:
|
||||
|
||||
# Same, no changes needed
|
||||
return (False, service.id)
|
||||
|
||||
# At this point, we know we will need to make a change
|
||||
if check_mode:
|
||||
return (True, None)
|
||||
|
||||
if service is None:
|
||||
service = keystone.services.create(name=name,
|
||||
service_type=service_type,
|
||||
description=description)
|
||||
return (True, service.id)
|
||||
else:
|
||||
msg = "keystone v2 API doesn't support updating services"
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
def ensure_endpoint_present(keystone, name, public_url, internal_url,
|
||||
admin_url, region, check_mode):
|
||||
""" Ensure the service endpoint is present and have the right values
|
||||
|
||||
Assumes the service object has already been created at this point"""
|
||||
|
||||
service = get_service(keystone, name)
|
||||
endpoint = None
|
||||
try:
|
||||
endpoint = get_endpoint(keystone, name)
|
||||
except:
|
||||
# Endpoint doesn't exist yet, we'll need to create one
|
||||
pass
|
||||
else:
|
||||
# See if it matches
|
||||
if endpoint.publicurl == public_url and \
|
||||
endpoint.adminurl == admin_url and \
|
||||
endpoint.internalurl == internal_url and \
|
||||
endpoint.region == region:
|
||||
try: service = get_service(keystone, name)
|
||||
except: pass
|
||||
try: endpoint = get_endpoint(keystone, name)
|
||||
except: pass
|
||||
|
||||
# Same, no changes needed
|
||||
return (False, endpoint.id)
|
||||
changed = False
|
||||
|
||||
# At this point, we know we will need to make a change
|
||||
if check_mode:
|
||||
return (True, None)
|
||||
# Delete endpoint if it exists and doesn't match.
|
||||
if endpoint is not None:
|
||||
identical = endpoint.publicurl == public_url and \
|
||||
endpoint.adminurl == admin_url and \
|
||||
endpoint.internalurl == internal_url and \
|
||||
endpoint.region == region
|
||||
if not identical:
|
||||
changed = True
|
||||
ensure_endpoint_absent(keystone, name, check_mode)
|
||||
endpoint = None
|
||||
|
||||
# Delete service and its endpoint if the service exists and doesn't match.
|
||||
if service is not None:
|
||||
identical = service.name == name and \
|
||||
service.type == service_type and \
|
||||
service.description == description
|
||||
if not identical:
|
||||
changed = True
|
||||
ensure_endpoint_absent(keystone, name, check_mode)
|
||||
endpoint = None
|
||||
ensure_service_absent(keystone, name, check_mode)
|
||||
service = None
|
||||
|
||||
# Recreate service, if necessary.
|
||||
if service is None:
|
||||
if not check_mode:
|
||||
service = keystone.services.create(
|
||||
name=name,
|
||||
service_type=service_type,
|
||||
description=description,
|
||||
)
|
||||
changed = True
|
||||
|
||||
# Recreate endpoint, if necessary.
|
||||
if endpoint is None:
|
||||
endpoint = keystone.endpoints.create(region=region,
|
||||
service_id=service.id,
|
||||
publicurl=public_url,
|
||||
adminurl=admin_url,
|
||||
internalurl=internal_url)
|
||||
return (True, endpoint.id)
|
||||
else:
|
||||
msg = "keystone v2 API doesn't support updating endpoints"
|
||||
raise ValueError(msg)
|
||||
if not check_mode:
|
||||
endpoint = keystone.endpoints.create(
|
||||
region=region,
|
||||
service_id=service.id,
|
||||
publicurl=public_url,
|
||||
adminurl=admin_url,
|
||||
internalurl=internal_url,
|
||||
)
|
||||
changed = True
|
||||
|
||||
if check_mode:
|
||||
# In check mode, the service/endpoint uuids will be the old uuids,
|
||||
# so omit them.
|
||||
return changed, None, None
|
||||
return changed, service.id, endpoint.id
|
||||
|
||||
|
||||
def ensure_service_absent(keystone, name, check_mode):
|
||||
""" Ensure the service is absent"""
|
||||
try:
|
||||
service = get_service(keystone, name)
|
||||
if not check_mode:
|
||||
keystone.services.delete(service.id)
|
||||
return True
|
||||
except KeyError:
|
||||
# Service doesn't exist, so we're done.
|
||||
return False
|
||||
|
||||
service = get_service(keystone, name)
|
||||
keystone.services.delete(service.id)
|
||||
return True
|
||||
|
||||
def ensure_endpoint_absent(keystone, name, check_mode):
|
||||
""" Ensure the service endpoint """
|
||||
endpoint = get_endpoint(keystone, name)
|
||||
keystone.endpoints.delete(endpoint.id)
|
||||
return True
|
||||
try:
|
||||
endpoint = get_endpoint(keystone, name)
|
||||
if not check_mode:
|
||||
keystone.endpoints.delete(endpoint.id)
|
||||
return True
|
||||
except KeyError:
|
||||
# Endpoint doesn't exist, so we're done.
|
||||
return False
|
||||
|
||||
|
||||
def dispatch(keystone, name, service_type, description, public_url,
|
||||
internal_url, admin_url, region, state, check_mode):
|
||||
|
||||
if state == 'present':
|
||||
(service_changed, service_id) = ensure_service_present(keystone,
|
||||
name,
|
||||
service_type,
|
||||
description,
|
||||
check_mode)
|
||||
|
||||
(endpoint_changed, endpoint_id) = ensure_endpoint_present(
|
||||
(changed, service_id, endpoint_id) = ensure_present(
|
||||
keystone,
|
||||
name,
|
||||
service_type,
|
||||
description,
|
||||
public_url,
|
||||
internal_url,
|
||||
admin_url,
|
||||
region,
|
||||
check_mode)
|
||||
return dict(changed=service_changed or endpoint_changed,
|
||||
service_id=service_id,
|
||||
endpoint_id=endpoint_id)
|
||||
check_mode,
|
||||
)
|
||||
return dict(changed=changed, service_id=service_id, endpoint_id=endpoint_id)
|
||||
elif state == 'absent':
|
||||
endpoint_changed = ensure_endpoint_absent(keystone, name, check_mode)
|
||||
service_changed = ensure_service_absent(keystone, name, check_mode)
|
||||
|
98
resources/cinder_api_puppet/README.md
Normal file
98
resources/cinder_api_puppet/README.md
Normal file
@ -0,0 +1,98 @@
|
||||
# Cinder API resource for puppet handler
|
||||
|
||||
Setup and configure the cinder API endpoint
|
||||
|
||||
## Parameters
|
||||
|
||||
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/api.pp
|
||||
|
||||
``keystone_password``
|
||||
The password to use for authentication (keystone)
|
||||
|
||||
``keystone_enabled``
|
||||
(optional) Use keystone for authentification
|
||||
Defaults to true
|
||||
|
||||
``keystone_tenant``
|
||||
(optional) The tenant of the auth user
|
||||
Defaults to services
|
||||
|
||||
``keystone_user``
|
||||
(optional) The name of the auth user
|
||||
Defaults to cinder
|
||||
|
||||
``keystone_auth_host``
|
||||
(optional) The keystone host
|
||||
Defaults to localhost
|
||||
|
||||
``keystone_auth_port``
|
||||
(optional) The keystone auth port
|
||||
Defaults to 35357
|
||||
|
||||
``keystone_auth_protocol``
|
||||
(optional) The protocol used to access the auth host
|
||||
Defaults to http.
|
||||
|
||||
``os_region_name``
|
||||
(optional) Some operations require cinder to make API requests
|
||||
to Nova. This sets the keystone region to be used for these
|
||||
requests. For example, boot-from-volume.
|
||||
Defaults to undef.
|
||||
|
||||
``keystone_auth_admin_prefix``
|
||||
(optional) The admin_prefix used to admin endpoint of the auth host
|
||||
This allow admin auth URIs like http://auth_host:35357/keystone.
|
||||
(where '/keystone' is the admin prefix)
|
||||
Defaults to false for empty. If defined, should be a string with a
|
||||
leading '/' and no trailing '/'.
|
||||
|
||||
``service_port``
|
||||
(optional) The cinder api port
|
||||
Defaults to 5000
|
||||
|
||||
``service_workers``
|
||||
(optional) Number of cinder-api workers
|
||||
Defaults to $::processorcount
|
||||
|
||||
``package_ensure``
|
||||
(optional) The state of the package
|
||||
Defaults to present
|
||||
|
||||
``bind_host``
|
||||
(optional) The cinder api bind address
|
||||
Defaults to 0.0.0.0
|
||||
|
||||
``ratelimits``
|
||||
(optional) The state of the service
|
||||
Defaults to undef. If undefined the default ratelimiting values are used.
|
||||
|
||||
``ratelimits_factory``
|
||||
(optional) Factory to use for ratelimiting
|
||||
Defaults to 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
|
||||
|
||||
``default_volume_type``
|
||||
(optional) default volume type to use.
|
||||
This should contain the name of the default volume type to use.
|
||||
If not configured, it produces an error when creating a volume
|
||||
without specifying a type.
|
||||
Defaults to 'false'.
|
||||
|
||||
``validate``
|
||||
(optional) Whether to validate the service is working after any service refreshes
|
||||
Defaults to false
|
||||
|
||||
``validation_options``
|
||||
(optional) Service validation options
|
||||
Should be a hash of options defined in openstacklib::service_validation
|
||||
If empty, defaults values are taken from openstacklib function.
|
||||
Default command list volumes.
|
||||
Require validate set at True.
|
||||
Example:
|
||||
glance::api::validation_options:
|
||||
glance-api:
|
||||
command: check_cinder-api.py
|
||||
path: /usr/bin:/bin:/usr/sbin:/sbin
|
||||
provider: shell
|
||||
tries: 5
|
||||
try_sleep: 10
|
||||
Defaults to {}
|
12
resources/cinder_api_puppet/actions/remove.pp
Normal file
12
resources/cinder_api_puppet/actions/remove.pp
Normal file
@ -0,0 +1,12 @@
|
||||
class {'cinder::api':
|
||||
enabled => false,
|
||||
package_ensure => 'absent',
|
||||
keystone_password => 'not important as removed',
|
||||
}
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => 'absent',
|
||||
name => $::cinder::params::package_name,
|
||||
}
|
52
resources/cinder_api_puppet/actions/run.pp
Normal file
52
resources/cinder_api_puppet/actions/run.pp
Normal file
@ -0,0 +1,52 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$keystone_enabled = $resource['input']['keystone_enabled']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$keystone_auth_host = $resource['input']['keystone_auth_host']['value']
|
||||
$keystone_auth_port = $resource['input']['keystone_auth_port']['value']
|
||||
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']['value']
|
||||
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']['value']
|
||||
$keystone_auth_uri = $resource['input']['keystone_auth_uri']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$service_port = $resource['input']['service_port']['value']
|
||||
$service_workers = $resource['input']['service_workers']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$ratelimits = $resource['input']['ratelimits']['value']
|
||||
$default_volume_type = $resource['input']['default_volume_type']['value']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => $package_ensure,
|
||||
name => $::cinder::params::package_name,
|
||||
} ->
|
||||
|
||||
class {'cinder::api':
|
||||
keystone_password => $keystone_password,
|
||||
keystone_enabled => $keystone_enabled,
|
||||
keystone_tenant => $keystone_tenant,
|
||||
keystone_user => $keystone_user,
|
||||
keystone_auth_host => $keystone_auth_host,
|
||||
keystone_auth_port => $keystone_auth_port,
|
||||
keystone_auth_protocol => $keystone_auth_protocol,
|
||||
keystone_auth_admin_prefix => $keystone_auth_admin_prefix,
|
||||
keystone_auth_uri => $keystone_auth_uri,
|
||||
os_region_name => $os_region_name,
|
||||
service_port => $service_port,
|
||||
service_workers => $service_workers,
|
||||
package_ensure => $package_ensure,
|
||||
bind_host => $bind_host,
|
||||
enabled => true,
|
||||
manage_service => true,
|
||||
ratelimits => $ratelimits,
|
||||
default_volume_type => $default_volume_type,
|
||||
ratelimits_factory => $ratelimits_factory,
|
||||
validate => $validate,
|
||||
validation_options => $validation_options,
|
||||
}
|
78
resources/cinder_api_puppet/meta.yaml
Normal file
78
resources/cinder_api_puppet/meta.yaml
Normal file
@ -0,0 +1,78 @@
|
||||
id: cinder_api_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder_api
|
||||
version: 1.0.0
|
||||
input:
|
||||
keystone_password:
|
||||
schema: str!
|
||||
value: 'keystone'
|
||||
keystone_enabled:
|
||||
schema: bool
|
||||
value: true
|
||||
keystone_tenant:
|
||||
schema: str
|
||||
value: 'services'
|
||||
keystone_user:
|
||||
schema: str
|
||||
value: 'cinder'
|
||||
keystone_auth_host:
|
||||
schema: str
|
||||
value: 'localhost'
|
||||
keystone_auth_port:
|
||||
schema: int
|
||||
value: 35357
|
||||
keystone_auth_protocol:
|
||||
schema: str
|
||||
value: 'http'
|
||||
keystone_auth_admin_prefix:
|
||||
schema: bool
|
||||
value: false
|
||||
keystone_auth_uri:
|
||||
schema: bool
|
||||
value: false
|
||||
os_region_name:
|
||||
schema: str
|
||||
value: ''
|
||||
service_port:
|
||||
schema: int
|
||||
value: 5000
|
||||
service_workers:
|
||||
schema: int
|
||||
value: 1
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
bind_host:
|
||||
schema: str
|
||||
value: '0.0.0.0'
|
||||
ratelimits:
|
||||
schema: str
|
||||
value: ''
|
||||
default_volume_type:
|
||||
schema: bool
|
||||
value: false
|
||||
ratelimits_factory:
|
||||
schema: str
|
||||
value: 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
|
||||
validate:
|
||||
schema: bool
|
||||
value: false
|
||||
validation_options:
|
||||
schema: {}
|
||||
value: {}
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_api_service, resources/cinder_api, resources/cinder]
|
10
resources/cinder_api_puppet/test.py
Normal file
10
resources/cinder_api_puppet/test.py
Normal file
@ -0,0 +1,10 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_api_puppet')
|
||||
requests.get(
|
||||
'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
)
|
112
resources/cinder_puppet/README.md
Normal file
112
resources/cinder_puppet/README.md
Normal file
@ -0,0 +1,112 @@
|
||||
# Cinder resource for puppet handler
|
||||
|
||||
Controlls a live cycle of the cinder entities,
|
||||
like the main puppet class, auth, DB, AMQP, packages,
|
||||
keystone user, role and endpoint.
|
||||
|
||||
# Parameters
|
||||
|
||||
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/init.pp
|
||||
|
||||
``database_connection``
|
||||
Url used to connect to database.
|
||||
(Optional) Defaults to
|
||||
'sqlite:////var/lib/cinder/cinder.sqlite'
|
||||
|
||||
``database_idle_timeout``
|
||||
Timeout when db connections should be reaped.
|
||||
(Optional) Defaults to 3600.
|
||||
|
||||
``database_min_pool_size``
|
||||
Minimum number of SQL connections to keep open in a pool.
|
||||
(Optional) Defaults to 1.
|
||||
|
||||
``database_max_pool_size``
|
||||
Maximum number of SQL connections to keep open in a pool.
|
||||
(Optional) Defaults to undef.
|
||||
|
||||
``database_max_retries``
|
||||
Maximum db connection retries during startup.
|
||||
Setting -1 implies an infinite retry count.
|
||||
(Optional) Defaults to 10.
|
||||
|
||||
``database_retry_interval``
|
||||
Interval between retries of opening a sql connection.
|
||||
(Optional) Defaults to 10.
|
||||
|
||||
``database_max_overflow``
|
||||
If set, use this value for max_overflow with sqlalchemy.
|
||||
(Optional) Defaults to undef.
|
||||
|
||||
``rabbit_use_ssl``
|
||||
(optional) Connect over SSL for RabbitMQ
|
||||
Defaults to false
|
||||
|
||||
``kombu_ssl_ca_certs``
|
||||
(optional) SSL certification authority file (valid only if SSL enabled).
|
||||
Defaults to undef
|
||||
|
||||
``kombu_ssl_certfile``
|
||||
(optional) SSL cert file (valid only if SSL enabled).
|
||||
Defaults to undef
|
||||
|
||||
``kombu_ssl_keyfile``
|
||||
(optional) SSL key file (valid only if SSL enabled).
|
||||
Defaults to undef
|
||||
|
||||
``kombu_ssl_version``
|
||||
(optional) SSL version to use (valid only if SSL enabled).
|
||||
Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be
|
||||
available on some distributions.
|
||||
Defaults to 'TLSv1'
|
||||
|
||||
``amqp_durable_queues``
|
||||
Use durable queues in amqp.
|
||||
(Optional) Defaults to false.
|
||||
|
||||
``use_syslog``
|
||||
Use syslog for logging.
|
||||
(Optional) Defaults to false.
|
||||
|
||||
``log_facility``
|
||||
Syslog facility to receive log lines.
|
||||
(Optional) Defaults to LOG_USER.
|
||||
|
||||
``log_dir``
|
||||
(optional) Directory where logs should be stored.
|
||||
If set to boolean false, it will not log to any directory.
|
||||
Defaults to '/var/log/cinder'
|
||||
|
||||
``use_ssl``
|
||||
(optional) Enable SSL on the API server
|
||||
Defaults to false, not set
|
||||
|
||||
``cert_file``
|
||||
(optinal) Certificate file to use when starting API server securely
|
||||
Defaults to false, not set
|
||||
|
||||
``key_file``
|
||||
(optional) Private key file to use when starting API server securely
|
||||
Defaults to false, not set
|
||||
|
||||
``ca_file``
|
||||
(optional) CA certificate file to use to verify connecting clients
|
||||
Defaults to false, not set_
|
||||
|
||||
``mysql_module``
|
||||
(optional) Deprecated. Does nothing.
|
||||
|
||||
``storage_availability_zone``
|
||||
(optional) Availability zone of the node.
|
||||
Defaults to 'nova'
|
||||
|
||||
``default_availability_zone``
|
||||
(optional) Default availability zone for new volumes.
|
||||
If not set, the storage_availability_zone option value is used as
|
||||
the default for new volumes.
|
||||
Defaults to false
|
||||
|
||||
``sql_connection``
|
||||
DEPRECATED
|
||||
``sql_idle_timeout``
|
||||
DEPRECATED
|
4
resources/cinder_puppet/actions/remove.pp
Normal file
4
resources/cinder_puppet/actions/remove.pp
Normal file
@ -0,0 +1,4 @@
|
||||
class {'cinder':
|
||||
package_ensure => 'absent',
|
||||
rabbit_password => 'not important as removed',
|
||||
}
|
114
resources/cinder_puppet/actions/run.pp
Normal file
114
resources/cinder_puppet/actions/run.pp
Normal file
@ -0,0 +1,114 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$database_min_pool_size = $resource['input']['database_min_pool_size']['value']
|
||||
$database_max_pool_size = $resource['input']['database_max_pool_size']['value']
|
||||
$database_max_retries = $resource['input']['database_max_retries']['value']
|
||||
$database_retry_interval = $resource['input']['database_retry_interval']['value']
|
||||
$database_max_overflow = $resource['input']['database_max_overflow']['value']
|
||||
$rpc_backend = $resource['input']['rpc_backend']['value']
|
||||
$control_exchange = $resource['input']['control_exchange']['value']
|
||||
$rabbit_host = $resource['input']['rabbit_host']['value']
|
||||
$rabbit_port = $resource['input']['rabbit_port']['value']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']['value']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value']
|
||||
$rabbit_userid = $resource['input']['rabbit_userid']['value']
|
||||
$rabbit_password = $resource['input']['rabbit_password']['value']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value']
|
||||
$amqp_durable_queues = $resource['input']['amqp_durable_queues']['value']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']['value']
|
||||
$qpid_port = $resource['input']['qpid_port']['value']
|
||||
$qpid_username = $resource['input']['qpid_username']['value']
|
||||
$qpid_password = $resource['input']['qpid_password']['value']
|
||||
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']['value']
|
||||
$qpid_reconnect = $resource['input']['qpid_reconnect']['value']
|
||||
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']['value']
|
||||
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']['value']
|
||||
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']['value']
|
||||
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']['value']
|
||||
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']['value']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']['value']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$use_ssl = $resource['input']['use_ssl']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$api_paste_config = $resource['input']['api_paste_config']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$storage_availability_zone = $resource['input']['storage_availability_zone']['value']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
# Do not apply the legacy stuff
|
||||
#$sql_connection = $resource['input']['sql_connection']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
|
||||
class {'cinder':
|
||||
database_connection => "mysql://${db_user}:${db_password}@${ip}/${db_name}",
|
||||
database_idle_timeout => $database_idle_timeout,
|
||||
database_min_pool_size => $database_min_pool_size,
|
||||
database_max_pool_size => $database_max_pool_size,
|
||||
database_max_retries => $database_max_retries,
|
||||
database_retry_interval => $database_retry_interval,
|
||||
database_max_overflow => $database_max_overflow,
|
||||
rpc_backend => $rpc_backend,
|
||||
control_exchange => $control_exchange,
|
||||
rabbit_host => $rabbit_host,
|
||||
rabbit_port => $rabbit_port,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_virtual_host => $rabbit_virtual_host,
|
||||
rabbit_userid => $rabbit_userid,
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_use_ssl => $rabbit_use_ssl,
|
||||
kombu_ssl_ca_certs => $kombu_ssl_ca_certs,
|
||||
kombu_ssl_certfile => $kombu_ssl_certfile,
|
||||
kombu_ssl_keyfile => $kombu_ssl_keyfile,
|
||||
kombu_ssl_version => $kombu_ssl_version,
|
||||
amqp_durable_queues => $amqp_durable_queues,
|
||||
qpid_hostname => $qpid_hostname,
|
||||
qpid_port => $qpid_port,
|
||||
qpid_username => $qpid_username,
|
||||
qpid_password => $qpid_password,
|
||||
qpid_sasl_mechanisms => $qpid_sasl_mechanisms,
|
||||
qpid_reconnect => $qpid_reconnect,
|
||||
qpid_reconnect_timeout => $qpid_reconnect_timeout,
|
||||
qpid_reconnect_limit => $qpid_reconnect_limit,
|
||||
qpid_reconnect_interval_min => $qpid_reconnect_interval_min,
|
||||
qpid_reconnect_interval_max => $qpid_reconnect_interval_max,
|
||||
qpid_reconnect_interval => $qpid_reconnect_interval,
|
||||
qpid_heartbeat => $qpid_heartbeat,
|
||||
qpid_protocol => $qpid_protocol,
|
||||
qpid_tcp_nodelay => $qpid_tcp_nodelay,
|
||||
package_ensure => $package_ensure,
|
||||
use_ssl => $use_ssl,
|
||||
ca_file => $ca_file,
|
||||
cert_file => $cert_file,
|
||||
key_file => $key_file,
|
||||
api_paste_config => $api_paste_config,
|
||||
use_syslog => $use_syslog,
|
||||
log_facility => $log_facility,
|
||||
log_dir => $log_dir,
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
storage_availability_zone => $storage_availability_zone,
|
||||
default_availability_zone => $default_availability_zone,
|
||||
mysql_module => $mysql_module,
|
||||
sql_connection => $sql_connection,
|
||||
sql_idle_timeout => $sql_idle_timeout,
|
||||
}
|
211
resources/cinder_puppet/meta.yaml
Normal file
211
resources/cinder_puppet/meta.yaml
Normal file
@ -0,0 +1,211 @@
|
||||
id: cinder_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder
|
||||
version: 1.0.0
|
||||
input:
|
||||
database_connection:
|
||||
schema: str
|
||||
value: 'sqlite:////var/lib/cinder/cinder.sqlite'
|
||||
database_idle_timeout:
|
||||
schema: int
|
||||
value: 3600
|
||||
database_min_pool_size:
|
||||
schema: int
|
||||
value: 1
|
||||
database_max_pool_size:
|
||||
schema: str
|
||||
value: ''
|
||||
database_max_retries:
|
||||
schema: int
|
||||
value: 10
|
||||
database_retry_interval:
|
||||
schema: int
|
||||
value: 10
|
||||
database_max_overflow:
|
||||
schema: str
|
||||
value: ''
|
||||
rpc_backend:
|
||||
schema: str
|
||||
value: 'cinder.openstack.common.rpc.impl_kombu'
|
||||
control_exchange:
|
||||
schema: str
|
||||
value: 'openstack'
|
||||
rabbit_host:
|
||||
schema: str
|
||||
value: '127.0.0.1'
|
||||
rabbit_port:
|
||||
schema: int
|
||||
value: 5672
|
||||
rabbit_hosts:
|
||||
schema: bool
|
||||
value: false
|
||||
rabbit_virtual_host:
|
||||
schema: str
|
||||
value: '/'
|
||||
rabbit_userid:
|
||||
schema: str
|
||||
value: 'guest'
|
||||
rabbit_password:
|
||||
schema: str!
|
||||
value: 'rabbit'
|
||||
rabbit_use_ssl:
|
||||
schema: bool
|
||||
value: false
|
||||
kombu_ssl_ca_certs:
|
||||
schema: str
|
||||
value: ''
|
||||
kombu_ssl_certfile:
|
||||
schema: str
|
||||
value: ''
|
||||
kombu_ssl_keyfile:
|
||||
schema: str
|
||||
value: ''
|
||||
kombu_ssl_version:
|
||||
schema: str
|
||||
value: 'TLSv1'
|
||||
amqp_durable_queues:
|
||||
schema: bool
|
||||
value: false
|
||||
qpid_hostname:
|
||||
schema: str
|
||||
value: 'localhost'
|
||||
qpid_port:
|
||||
schema: int
|
||||
value: 5672
|
||||
qpid_username:
|
||||
schema: str
|
||||
value: 'guest'
|
||||
qpid_password:
|
||||
schema: str!
|
||||
value: 'qpid'
|
||||
qpid_sasl_mechanisms:
|
||||
schema: bool
|
||||
value: false
|
||||
qpid_reconnect:
|
||||
schema: bool
|
||||
value: true
|
||||
qpid_reconnect_timeout:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_limit:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_interval_min:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_interval_max:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_interval:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_heartbeat:
|
||||
schema: int
|
||||
value: 60
|
||||
qpid_protocol:
|
||||
schema: str
|
||||
value: 'tcp'
|
||||
qpid_tcp_nodelay:
|
||||
schema: bool
|
||||
value: true
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
use_ssl:
|
||||
schema: bool
|
||||
value: false
|
||||
ca_file:
|
||||
schema: bool
|
||||
value: false
|
||||
cert_file:
|
||||
schema: bool
|
||||
value: false
|
||||
key_file:
|
||||
schema: bool
|
||||
value: false
|
||||
api_paste_config:
|
||||
schema: str
|
||||
value: '/etc/cinder/api-paste.ini'
|
||||
use_syslog:
|
||||
schema: bool
|
||||
value: false
|
||||
log_facility:
|
||||
schema: str
|
||||
value: 'LOG_USER'
|
||||
log_dir:
|
||||
schema: str
|
||||
value: '/var/log/cinder'
|
||||
verbose:
|
||||
schema: bool
|
||||
value: false
|
||||
debug:
|
||||
schema: bool
|
||||
value: false
|
||||
storage_availability_zone:
|
||||
schema: str
|
||||
value: 'nova'
|
||||
default_availability_zone:
|
||||
schema: bool
|
||||
value: false
|
||||
mysql_module:
|
||||
schema: str
|
||||
value: ''
|
||||
sql_connection:
|
||||
schema: str
|
||||
value: ''
|
||||
sql_idle_timeout:
|
||||
schema: str
|
||||
value: ''
|
||||
|
||||
db_user:
|
||||
schema: str!
|
||||
value: cinder
|
||||
db_password:
|
||||
schema: str!
|
||||
value: cinder
|
||||
db_name:
|
||||
schema: str!
|
||||
value: cinder
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
port:
|
||||
schema: int!
|
||||
value: 8776
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value: ''
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value: ''
|
||||
keystone_user:
|
||||
schema: str!
|
||||
value: ''
|
||||
keystone_password:
|
||||
schema: str!
|
||||
value: ''
|
||||
keystone_tenant:
|
||||
schema: str!
|
||||
value: ''
|
||||
|
||||
# forge:
|
||||
# schema: str!
|
||||
# value: 'stackforge-cinder'
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_service, resources/cinder]
|
10
resources/cinder_puppet/test.py
Normal file
10
resources/cinder_puppet/test.py
Normal file
@ -0,0 +1,10 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_puppet')
|
||||
requests.get(
|
||||
'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
)
|
3
resources/cinder_scheduler_puppet/README.md
Normal file
3
resources/cinder_scheduler_puppet/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Cinder Scheduler resource for puppet handler
|
||||
|
||||
Setup and configure the cinder scheduler service
|
4
resources/cinder_scheduler_puppet/actions/remove.pp
Normal file
4
resources/cinder_scheduler_puppet/actions/remove.pp
Normal file
@ -0,0 +1,4 @@
|
||||
class {'cinder::scheduler':
|
||||
enabled => false,
|
||||
package_ensure => 'absent',
|
||||
}
|
18
resources/cinder_scheduler_puppet/actions/run.pp
Normal file
18
resources/cinder_scheduler_puppet/actions/run.pp
Normal file
@ -0,0 +1,18 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$scheduler_driver = $resource['input']['scheduler_driver']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => $package_ensure,
|
||||
name => $::cinder::params::package_name,
|
||||
} ->
|
||||
|
||||
class {'cinder::scheduler':
|
||||
scheduler_driver => $scheduler_driver,
|
||||
package_ensure => $package_ensure,
|
||||
enabled => true,
|
||||
manage_service => true,
|
||||
}
|
27
resources/cinder_scheduler_puppet/meta.yaml
Normal file
27
resources/cinder_scheduler_puppet/meta.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
id: cinder_scheduler_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder_scheduler
|
||||
version: 1.0.0
|
||||
input:
|
||||
scheduler_driver:
|
||||
schema: str
|
||||
value: ''
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_scheduler_service, resources/cinder_scheduler, resources/cinder]
|
12
resources/cinder_scheduler_puppet/test.py
Normal file
12
resources/cinder_scheduler_puppet/test.py
Normal file
@ -0,0 +1,12 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_scheduler_puppet')
|
||||
# requests.get(
|
||||
# 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
# TODO(bogdando) figure out how to test this
|
||||
# http://docs.openstack.org/developer/nova/devref/scheduler.html
|
||||
# )
|
3
resources/cinder_volume_puppet/README.md
Normal file
3
resources/cinder_volume_puppet/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Cinder Volume resource for puppet handler
|
||||
|
||||
Setup and configure the cinder volume service
|
4
resources/cinder_volume_puppet/actions/remove.pp
Normal file
4
resources/cinder_volume_puppet/actions/remove.pp
Normal file
@ -0,0 +1,4 @@
|
||||
class {'cinder::volume':
|
||||
enabled => false,
|
||||
package_ensure => 'absent',
|
||||
}
|
16
resources/cinder_volume_puppet/actions/run.pp
Normal file
16
resources/cinder_volume_puppet/actions/run.pp
Normal file
@ -0,0 +1,16 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => $package_ensure,
|
||||
name => $::cinder::params::package_name,
|
||||
} ->
|
||||
|
||||
class {'cinder::volume':
|
||||
package_ensure => $package_ensure,
|
||||
enabled => true,
|
||||
manage_service => true,
|
||||
}
|
24
resources/cinder_volume_puppet/meta.yaml
Normal file
24
resources/cinder_volume_puppet/meta.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
id: cinder_volume_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder_volume
|
||||
version: 1.0.0
|
||||
input:
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_volume_service, resources/cinder_volume, resources/cinder]
|
12
resources/cinder_volume_puppet/test.py
Normal file
12
resources/cinder_volume_puppet/test.py
Normal file
@ -0,0 +1,12 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_volume_puppet')
|
||||
# requests.get(
|
||||
# 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
# TODO(bogdando) figure out how to test this
|
||||
# http://docs.openstack.org/developer/nova/devref/volume.html
|
||||
# )
|
@ -3,6 +3,7 @@ $resource = hiera($::resource_name)
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$admin_token = $resource['input']['admin_token']['value']
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$admin_port = $resource['input']['admin_port']['value']
|
||||
@ -13,8 +14,9 @@ class {'keystone':
|
||||
verbose => true,
|
||||
catalog_type => 'sql',
|
||||
admin_token => $admin_token,
|
||||
database_connection => "mysql://$db_user:$db_password@$ip/$db_name",
|
||||
database_connection => "mysql://$db_user:$db_password@$db_host/$db_name",
|
||||
public_port => "$port",
|
||||
admin_port => "$admin_port",
|
||||
token_driver => 'keystone.token.backends.kvs.Token'
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,16 @@ input:
|
||||
value: admin_token
|
||||
db_user:
|
||||
schema: str!
|
||||
value: keystone
|
||||
value:
|
||||
db_password:
|
||||
schema: str!
|
||||
value: keystone
|
||||
value:
|
||||
db_name:
|
||||
schema: str!
|
||||
value: keystone
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
admin_port:
|
||||
schema: int!
|
||||
|
@ -8,4 +8,4 @@
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -8,4 +8,4 @@
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -8,6 +8,9 @@ input:
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
login_user:
|
||||
schema: str!
|
||||
|
@ -1,11 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb db
|
||||
mysql_db:
|
||||
name: {{db_name}}
|
||||
state: absent
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
@ -1,13 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb db
|
||||
mysql_db:
|
||||
name: {{ db_name }}
|
||||
#collation: utf8_encode_ci
|
||||
encoding: utf8
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
@ -1,30 +0,0 @@
|
||||
id: mariadb_keystone_db
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
actions:
|
||||
run: run.yml
|
||||
remove: remove.yml
|
||||
input:
|
||||
db_name:
|
||||
schema: str!
|
||||
value: keystone_db
|
||||
login_user:
|
||||
schema: str!
|
||||
value: root
|
||||
login_password:
|
||||
schema: str!
|
||||
value:
|
||||
login_port:
|
||||
schema: int!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/mariadb_keystone_db, resources/mariadb]
|
@ -1,11 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb user
|
||||
mysql_user:
|
||||
name: {{new_user_name}}
|
||||
state: absent
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
@ -1,14 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb user
|
||||
mysql_user:
|
||||
name: {{ new_user_name }}
|
||||
password: {{ new_user_password }}
|
||||
priv: {{ db_name }}.*:ALL
|
||||
host: '%'
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
@ -1,37 +0,0 @@
|
||||
id: mariadb_keystone_user
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
actions:
|
||||
run: run.yml
|
||||
remove: remove.yml
|
||||
input:
|
||||
new_user_password:
|
||||
schema: str!
|
||||
value: keystone
|
||||
new_user_name:
|
||||
schema: str!
|
||||
value: keystone
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
login_password:
|
||||
schema: str!
|
||||
value:
|
||||
login_port:
|
||||
schema: int!
|
||||
value:
|
||||
login_user:
|
||||
schema: str!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/mariadb_keystone_user, resources/mariadb]
|
||||
|
@ -8,4 +8,4 @@
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -11,4 +11,4 @@
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -15,6 +15,9 @@ input:
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
login_password:
|
||||
schema: str!
|
||||
|
@ -1,4 +1,4 @@
|
||||
$resource = hiera('{{ resource_name }}')
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
|
||||
@ -6,6 +6,7 @@ $rabbitmq_user = $resource['input']['rabbitmq_user']['value']
|
||||
$rabbitmq_password = $resource['input']['rabbitmq_password']['value']
|
||||
$rabbitmq_host = $resource['input']['rabbitmq_host']['value']
|
||||
$rabbitmq_port = $resource['input']['rabbitmq_port']['value']
|
||||
$rabbitmq_virtual_host = $resource['input']['rabbitmq_virtual_host']['value']
|
||||
|
||||
$keystone_host = $resource['input']['keystone_host']['value']
|
||||
$keystone_port = $resource['input']['keystone_port']['value']
|
||||
@ -23,6 +24,7 @@ class { 'neutron':
|
||||
rabbit_password => $rabbitmq_password,
|
||||
rabbit_host => $rabbitmq_host,
|
||||
rabbit_port => $rabbitmq_port,
|
||||
rabbit_virtual_host => $rabbitmq_virtual_host,
|
||||
service_plugins => ['metering']
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,9 @@ input:
|
||||
rabbitmq_password:
|
||||
schema: str!
|
||||
value: ''
|
||||
rabbitmq_virtual_host:
|
||||
schema: str!
|
||||
value: ''
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
|
@ -1,6 +0,0 @@
|
||||
# TODO
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- shell: docker stop {{ name }}
|
||||
- shell: docker rm {{ name }}
|
@ -1,6 +0,0 @@
|
||||
# TODO
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- shell: docker run -d --net="host" --privileged \
|
||||
--name {{ name }} {{ image }}
|
@ -1,13 +0,0 @@
|
||||
id: nova
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
port:
|
||||
schema: int!
|
||||
value: 8774
|
||||
image: # TODO
|
||||
schema: str!
|
||||
value:
|
29
resources/nova_api_puppet/actions/remove.pp
Normal file
29
resources/nova_api_puppet/actions/remove.pp
Normal file
@ -0,0 +1,29 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$rabbitmq_user = $resource['input']['rabbitmq_user']['value']
|
||||
$rabbitmq_password = $resource['input']['rabbitmq_password']['value']
|
||||
$rabbitmq_host = $resource['input']['rabbitmq_host']['value']
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$keystone_host = $resource['input']['keystone_host']['value']
|
||||
$keystone_port = $resource['input']['keystone_port']['value']
|
||||
$keystone_tenant_name = $resource['input']['keystone_tenant_name']['value']
|
||||
$keystone_user = $resource['input']['keystone_user_name']['value']
|
||||
|
||||
class { 'nova':
|
||||
database_connection => "mysql://${db_user}:${db_password}@${db_host}/${db_name}?charset=utf8",
|
||||
rabbit_userid => $rabbitmq_user,
|
||||
rabbit_password => $rabbitmq_password,
|
||||
rabbit_host => $rabbitmq_host,
|
||||
image_service => 'nova.image.glance.GlanceImageService',
|
||||
glance_api_servers => 'localhost:9292',
|
||||
verbose => false,
|
||||
}
|
||||
|
||||
class { 'nova::api':
|
||||
admin_password => $keystone_password,
|
||||
ensure_package => 'absent'
|
||||
}
|
33
resources/nova_api_puppet/actions/run.pp
Normal file
33
resources/nova_api_puppet/actions/run.pp
Normal file
@ -0,0 +1,33 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$rabbitmq_user = $resource['input']['rabbitmq_user']['value']
|
||||
$rabbitmq_password = $resource['input']['rabbitmq_password']['value']
|
||||
$rabbitmq_host = $resource['input']['rabbitmq_host']['value']
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$keystone_host = $resource['input']['keystone_host']['value']
|
||||
$keystone_port = $resource['input']['keystone_port']['value']
|
||||
$keystone_tenant_name = $resource['input']['keystone_tenant_name']['value']
|
||||
$keystone_user = $resource['input']['keystone_user_name']['value']
|
||||
|
||||
class { 'nova':
|
||||
database_connection => "mysql://${db_user}:${db_password}@${db_host}/${db_name}?charset=utf8",
|
||||
rabbit_userid => $rabbitmq_user,
|
||||
rabbit_password => $rabbitmq_password,
|
||||
rabbit_host => $rabbitmq_host,
|
||||
image_service => 'nova.image.glance.GlanceImageService',
|
||||
glance_api_servers => 'localhost:9292',
|
||||
verbose => false,
|
||||
}
|
||||
|
||||
class { 'nova::api':
|
||||
enabled => true,
|
||||
admin_user => $keystone_user,
|
||||
admin_password => $keystone_password,
|
||||
auth_host => $keystone_host,
|
||||
auth_port => $keystone_port,
|
||||
admin_tenant_name => $keystone_tenant_name,
|
||||
}
|
59
resources/nova_api_puppet/meta.yaml
Normal file
59
resources/nova_api_puppet/meta.yaml
Normal file
@ -0,0 +1,59 @@
|
||||
id: nova
|
||||
handler: puppet
|
||||
puppet_module: nova
|
||||
version: 1.0.0
|
||||
input:
|
||||
db_user:
|
||||
schema: str!
|
||||
value: nova
|
||||
db_password:
|
||||
schema: str!
|
||||
value:
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
rabbitmq_user:
|
||||
schema: str!
|
||||
value:
|
||||
rabbitmq_password:
|
||||
schema: str!
|
||||
value:
|
||||
rabbitmq_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
keystone_password:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value:
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_tenant_name:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_user_name:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-nova', branch: 'stable/juno'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/nova_service, resources/nova, resource/nova-api]
|
@ -7,3 +7,4 @@ export OS_PASSWORD={{password}}
|
||||
export OS_AUTH_URL=http://{{keystone_host}}:{{keystone_port}}/v2.0
|
||||
export OS_AUTH_STRATEGY=keystone
|
||||
export OS_REGION_NAME='RegionOne'
|
||||
export OS_VOLUME_API_VERSION='2'
|
@ -1,11 +1,8 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$node_name = $resource['input']['node_name']['value']
|
||||
|
||||
class { '::rabbitmq':
|
||||
package_ensure => 'absent',
|
||||
environment_variables => {
|
||||
'RABBITMQ_NODENAME' => $node_name,
|
||||
'RABBITMQ_SERVICENAME' => 'RabbitMQ'
|
||||
}
|
||||
}
|
||||
|
@ -2,7 +2,6 @@ $resource = hiera($::resource_name)
|
||||
|
||||
$port = "${resource['input']['port']['value']}"
|
||||
$management_port = "${resource['input']['management_port']['value']}"
|
||||
$node_name = $resource['input']['node_name']['value']
|
||||
|
||||
class { '::rabbitmq':
|
||||
service_manage => true,
|
||||
|
@ -11,15 +11,12 @@ input:
|
||||
schema: str!
|
||||
value: ''
|
||||
|
||||
node_name:
|
||||
schema: str!
|
||||
value: 'node1'
|
||||
port:
|
||||
schema: int!
|
||||
value: ''
|
||||
value: 5672
|
||||
management_port:
|
||||
schema: int!
|
||||
value: ''
|
||||
value: 15672
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git', branch: '5.1.0'}
|
||||
|
@ -16,7 +16,7 @@ fi
|
||||
|
||||
. $VENV/bin/activate
|
||||
|
||||
pip install -r solar/requirements.txt --download-cache=/tmp/$JOB_NAME
|
||||
pip install -r solar/test-requirements.txt --download-cache=/tmp/$JOB_NAME
|
||||
|
||||
pushd solar/solar
|
||||
|
||||
|
14
slave_cinder.yml
Normal file
14
slave_cinder.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
sudo: yes
|
||||
tasks:
|
||||
- apt: name=lvm2 state=present
|
||||
- command: sudo truncate -s 10G /root/cinder.img creates=/root/cinder.img
|
||||
- shell: sudo losetup -a|grep cinder
|
||||
register: loop_created
|
||||
ignore_errors: True
|
||||
- command: sudo losetup /dev/loop0 /root/cinder.img
|
||||
when: loop_created|failed
|
||||
- lvg: vg=cinder-volumes pvs=/dev/loop0
|
||||
when: loop_created|failed
|
@ -6,7 +6,6 @@ networkx==1.9.1
|
||||
PyYAML==3.11
|
||||
jsonschema==2.4.0
|
||||
requests==2.7.0
|
||||
#mock
|
||||
dictdiffer==0.4.0
|
||||
enum34==1.0.4
|
||||
redis==2.10.3
|
||||
@ -15,3 +14,5 @@ fakeredis
|
||||
inflection
|
||||
Fabric==1.10.2
|
||||
tabulate==0.7.5
|
||||
ansible
|
||||
celery
|
||||
|
@ -29,8 +29,6 @@ import tabulate
|
||||
import yaml
|
||||
|
||||
from solar import utils
|
||||
from solar import operations
|
||||
from solar import state
|
||||
from solar.core import actions
|
||||
from solar.core import resource as sresource
|
||||
from solar.core.resource import assign_resources_to_nodes
|
||||
@ -39,8 +37,11 @@ from solar.core.tags_set_parser import Expression
|
||||
from solar.core import testing
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.interfaces.db import get_db
|
||||
from solar import errors
|
||||
from solar.core.log import log
|
||||
|
||||
from solar.cli.orch import orchestration
|
||||
from solar.cli.system_log import changes
|
||||
|
||||
# NOTE: these are extensions, they shouldn't be imported here
|
||||
# Maybe each extension can also extend the CLI with parsers
|
||||
@ -214,54 +215,6 @@ def init_actions():
|
||||
))
|
||||
|
||||
|
||||
def init_changes():
|
||||
@main.group()
|
||||
def changes():
|
||||
pass
|
||||
|
||||
@changes.command()
|
||||
def validate():
|
||||
errors = vr.validate_resources()
|
||||
if errors:
|
||||
for r, error in errors:
|
||||
print 'ERROR: %s: %s' % (r.name, error)
|
||||
sys.exit(1)
|
||||
|
||||
@changes.command()
|
||||
def stage():
|
||||
log = operations.stage_changes()
|
||||
click.echo(log.show())
|
||||
|
||||
@changes.command()
|
||||
@click.option('--one', is_flag=True, default=False)
|
||||
def commit(one):
|
||||
if one:
|
||||
operations.commit_one()
|
||||
else:
|
||||
operations.commit_changes()
|
||||
|
||||
@changes.command()
|
||||
@click.option('--limit', default=5)
|
||||
def history(limit):
|
||||
click.echo(state.CL().show())
|
||||
|
||||
@changes.command()
|
||||
@click.option('--last', is_flag=True, default=False)
|
||||
@click.option('--all', is_flag=True, default=False)
|
||||
@click.option('--uid', default=None)
|
||||
def rollback(last, all, uid):
|
||||
if last:
|
||||
click.echo(operations.rollback_last())
|
||||
elif all:
|
||||
click.echo(operations.rollback_all())
|
||||
elif uid:
|
||||
click.echo(operations.rollback_uid(uid))
|
||||
|
||||
@changes.command()
|
||||
def test():
|
||||
testing.test_all()
|
||||
|
||||
|
||||
def init_cli_connect():
|
||||
@main.command()
|
||||
@click.argument('emitter')
|
||||
@ -362,8 +315,13 @@ def init_cli_resource():
|
||||
click.echo(
|
||||
'action {} for resource {}'.format(action, resource)
|
||||
)
|
||||
actions.resource_action(sresource.load(resource), action)
|
||||
|
||||
r = sresource.load(resource)
|
||||
try:
|
||||
actions.resource_action(r, action)
|
||||
except errors.SolarError as e:
|
||||
log.debug(e)
|
||||
sys.exit(1)
|
||||
|
||||
if dry_run:
|
||||
click.echo('EXECUTED:')
|
||||
@ -395,7 +353,8 @@ def init_cli_resource():
|
||||
|
||||
@resource.command()
|
||||
@click.argument('name')
|
||||
@click.argument('base_path', type=click.Path(exists=True, file_okay=True))
|
||||
@click.argument(
|
||||
'base_path', type=click.Path(exists=True, resolve_path=True))
|
||||
@click.argument('args', nargs=-1)
|
||||
def create(args, base_path, name):
|
||||
args_parsed = {}
|
||||
@ -506,13 +465,13 @@ def init_cli_resource():
|
||||
|
||||
def run():
|
||||
init_actions()
|
||||
init_changes()
|
||||
init_cli_connect()
|
||||
init_cli_connections()
|
||||
init_cli_deployment_config()
|
||||
init_cli_resource()
|
||||
|
||||
main.add_command(orchestration)
|
||||
main.add_command(changes)
|
||||
main()
|
||||
|
||||
|
||||
|
@ -21,25 +21,29 @@ def orchestration():
|
||||
restart <id> --reset
|
||||
"""
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@click.argument('plan', type=click.File('rb'))
|
||||
def create(plan):
|
||||
click.echo(graph.create_plan(plan.read()))
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@click.argument('uid')
|
||||
@click.argument('plan', type=click.File('rb'))
|
||||
def update(uid, plan):
|
||||
graph.update_plan(uid, plan.read())
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@click.argument('uid')
|
||||
def report(uid):
|
||||
colors = {
|
||||
'PENDING': 'blue',
|
||||
'PENDING': 'cyan',
|
||||
'ERROR': 'red',
|
||||
'SUCCESS': 'green',
|
||||
'INPROGRESS': 'yellow'}
|
||||
'INPROGRESS': 'yellow',
|
||||
'SKIPPED': 'blue'}
|
||||
|
||||
report = graph.report_topo(uid)
|
||||
for item in report:
|
||||
@ -78,7 +82,14 @@ def stop(uid):
|
||||
# using revoke(terminate=True) will lead to inability to restart execution
|
||||
# research possibility of customizations of
|
||||
# app.control and Panel.register in celery
|
||||
graph.soft_stop(uid)
|
||||
tasks.soft_stop.apply_async(args=[uid], queue='scheduler')
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@click.argument('uid')
|
||||
def resume(uid):
|
||||
graph.reset(uid, ['SKIPPED'])
|
||||
tasks.schedule_start.apply_async(args=[uid], queue='scheduler')
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@ -94,13 +105,23 @@ def dg(uid):
|
||||
plan = graph.get_graph(uid)
|
||||
|
||||
colors = {
|
||||
'PENDING': 'blue',
|
||||
'PENDING': 'cyan',
|
||||
'ERROR': 'red',
|
||||
'SUCCESS': 'green',
|
||||
'INPROGRESS': 'yellow'}
|
||||
'INPROGRESS': 'yellow',
|
||||
'SKIPPED': 'blue'}
|
||||
|
||||
for n in plan:
|
||||
color = colors[plan.node[n]['status']]
|
||||
plan.node[n]['color'] = color
|
||||
nx.write_dot(plan, 'graph.dot')
|
||||
subprocess.call(['dot', '-Tpng', 'graph.dot', '-o', 'graph.png'])
|
||||
nx.write_dot(plan, '{name}.dot'.format(name=plan.graph['name']))
|
||||
subprocess.call(
|
||||
'tred {name}.dot | dot -Tpng -o {name}.png'.format(name=plan.graph['name']),
|
||||
shell=True)
|
||||
click.echo('Created {name}.png'.format(name=plan.graph['name']))
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@click.argument('uid')
|
||||
def show(uid):
|
||||
click.echo(graph.show(uid))
|
||||
|
66
solar/solar/cli/system_log.py
Normal file
66
solar/solar/cli/system_log.py
Normal file
@ -0,0 +1,66 @@
|
||||
|
||||
import sys
|
||||
|
||||
import click
|
||||
|
||||
from solar.core import testing
|
||||
from solar.core import resource
|
||||
from solar.system_log import change
|
||||
from solar.system_log import operations
|
||||
from solar.system_log import data
|
||||
|
||||
|
||||
@click.group()
|
||||
def changes():
|
||||
pass
|
||||
|
||||
|
||||
@changes.command()
|
||||
def validate():
|
||||
errors = resource.validate_resources()
|
||||
if errors:
|
||||
for r, error in errors:
|
||||
print 'ERROR: %s: %s' % (r.name, error)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@changes.command()
|
||||
def stage():
|
||||
log = change.stage_changes()
|
||||
staged = list(log.reverse())
|
||||
if not staged:
|
||||
click.echo('No changes')
|
||||
click.echo(staged)
|
||||
|
||||
|
||||
@changes.command()
|
||||
def process():
|
||||
click.echo(change.send_to_orchestration())
|
||||
|
||||
|
||||
@changes.command()
|
||||
@click.argument('uid')
|
||||
def commit(uid):
|
||||
operations.commit(uid)
|
||||
|
||||
|
||||
@changes.command()
|
||||
@click.option('-n', default=5)
|
||||
def history(n):
|
||||
commited = list(data.CL().collection(n))
|
||||
if not commited:
|
||||
click.echo('No history.')
|
||||
return
|
||||
commited.reverse()
|
||||
click.echo(commited)
|
||||
|
||||
|
||||
@changes.command()
|
||||
def test():
|
||||
testing.test_all()
|
||||
|
||||
|
||||
@changes.command(name='clean-history')
|
||||
def clean_history():
|
||||
data.CL().clean()
|
||||
data.CD().clean()
|
@ -1,11 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from fabric import api as fabric_api
|
||||
from fabric.state import env
|
||||
import os
|
||||
|
||||
from solar.core.log import log
|
||||
from solar.core.handlers.base import TempFileHandler
|
||||
from solar import errors
|
||||
|
||||
|
||||
# otherwise fabric will sys.exit(1) in case of errors
|
||||
env.warn_only = True
|
||||
|
||||
class AnsibleTemplate(TempFileHandler):
|
||||
def action(self, resource, action_name):
|
||||
inventory_file = self._create_inventory(resource)
|
||||
@ -15,12 +20,10 @@ class AnsibleTemplate(TempFileHandler):
|
||||
call_args = ['ansible-playbook', '--module-path', '/vagrant/library', '-i', inventory_file, playbook_file]
|
||||
log.debug('EXECUTING: %s', ' '.join(call_args))
|
||||
|
||||
try:
|
||||
fabric_api.local(' '.join(call_args))
|
||||
except Exception as e:
|
||||
log.error(e.output)
|
||||
log.exception(e)
|
||||
raise
|
||||
out = fabric_api.local(' '.join(call_args), capture=True)
|
||||
if out.failed:
|
||||
raise errors.SolarError(out)
|
||||
|
||||
|
||||
def _create_inventory(self, r):
|
||||
directory = self.dirs[r.name]
|
||||
|
@ -9,20 +9,7 @@ import os
|
||||
from solar.core.log import log
|
||||
from solar.core.handlers.base import TempFileHandler
|
||||
from solar.core.provider import GitProvider
|
||||
|
||||
|
||||
# TODO:
|
||||
# puppet wont always return 0 on error, example:
|
||||
# http://unix.stackexchange.com/questions/165333/how-to-get-non-zero-exit-code-from-puppet-when-configuration-cannot-be-applied
|
||||
|
||||
# in fuel there is special handler based on puppet summary, but i think we can also use --detailed-exitcode
|
||||
# https://docs.puppetlabs.com/references/3.6.2/man/agent.html
|
||||
# --detailed-exitcodes
|
||||
# Provide transaction information via exit codes. If this is enabled, an exit
|
||||
# code of '2' means there were changes, an exit code of '4' means there were
|
||||
# failures during the transaction, and an exit code of '6' means there were
|
||||
# both changes and failures.
|
||||
|
||||
from solar import errors
|
||||
|
||||
|
||||
class ResourceSSHMixin(object):
|
||||
@ -48,6 +35,10 @@ class ResourceSSHMixin(object):
|
||||
fabric_api.shell_env(**kwargs['env'])
|
||||
)
|
||||
|
||||
if 'warn_only' in kwargs:
|
||||
managers.append(
|
||||
fabric_api.warn_only())
|
||||
|
||||
with nested(*managers):
|
||||
return executor(' '.join(args))
|
||||
|
||||
@ -162,14 +153,21 @@ class Puppet(ResourceSSHMixin, TempFileHandler):
|
||||
|
||||
self._scp_command(resource, action_file, '/tmp/action.pp')
|
||||
|
||||
self._ssh_command(
|
||||
cmd = self._ssh_command(
|
||||
resource,
|
||||
'puppet', 'apply', '-vd', '/tmp/action.pp',
|
||||
'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes',
|
||||
env={
|
||||
'FACTER_resource_name': resource.name,
|
||||
},
|
||||
use_sudo=True
|
||||
use_sudo=True,
|
||||
warn_only=True,
|
||||
)
|
||||
# 0 - no changes, 2 - successfull changes
|
||||
if cmd.return_code not in [0, 2]:
|
||||
raise errors.SolarError(
|
||||
'Puppet for {} failed with {}'.format(
|
||||
resource.name, cmd.return_code))
|
||||
return cmd
|
||||
|
||||
def clone_manifests(self, resource):
|
||||
git = resource.args['git'].value
|
||||
|
@ -7,6 +7,7 @@ __all__ = [
|
||||
'load_all',
|
||||
'prepare_meta',
|
||||
'wrap_resource',
|
||||
'validate_resources',
|
||||
]
|
||||
|
||||
|
||||
@ -18,3 +19,4 @@ from solar.core.resource.resource import load_all
|
||||
from solar.core.resource.resource import wrap_resource
|
||||
from solar.core.resource.virtual_resource import create
|
||||
from solar.core.resource.virtual_resource import prepare_meta
|
||||
from solar.core.resource.virtual_resource import validate_resources
|
||||
|
@ -41,9 +41,11 @@ def create_virtual_resource(vr_name, template):
|
||||
resources = template['resources']
|
||||
connections = []
|
||||
created_resources = []
|
||||
|
||||
cwd = os.getcwd()
|
||||
for resource in resources:
|
||||
name = resource['id']
|
||||
base_path = resource['from']
|
||||
base_path = os.path.join(cwd, resource['from'])
|
||||
args = resource['values']
|
||||
new_resources = create(name, base_path, args, vr_name)
|
||||
created_resources += new_resources
|
||||
|
@ -67,6 +67,9 @@ class RedisDB(object):
|
||||
def clear(self):
|
||||
self._r.flushdb()
|
||||
|
||||
def get_set(self, collection):
|
||||
return OrderedSet(self._r, collection)
|
||||
|
||||
def clear_collection(self, collection=COLLECTIONS.resource):
|
||||
key_glob = self._make_key(collection, '*')
|
||||
|
||||
@ -83,6 +86,57 @@ class RedisDB(object):
|
||||
return '{0}:{1}'.format(collection, _id)
|
||||
|
||||
|
||||
class OrderedSet(object):
|
||||
|
||||
def __init__(self, client, collection):
|
||||
self.r = client
|
||||
self.collection = collection
|
||||
self.order_counter = '{}:incr'.format(collection)
|
||||
self.order = '{}:order'.format(collection)
|
||||
|
||||
def add(self, items):
|
||||
pipe = self.r.pipeline()
|
||||
for key, value in items:
|
||||
count = self.r.incr(self.order_counter)
|
||||
pipe.zadd(self.order, count, key)
|
||||
pipe.hset(self.collection, key, json.dumps(value))
|
||||
pipe.execute()
|
||||
|
||||
def rem(self, keys):
|
||||
pipe = self.r.pipeline()
|
||||
for key in keys:
|
||||
pipe.zrem(self.order, key)
|
||||
pipe.hdel(self.collection, key)
|
||||
pipe.execute()
|
||||
|
||||
def get(self, key):
|
||||
value = self.r.hget(self.collection, key)
|
||||
if value:
|
||||
return json.loads(value)
|
||||
return None
|
||||
|
||||
def update(self, key, value):
|
||||
self.r.hset(self.collection, key, json.dumps(value))
|
||||
|
||||
def clean(self):
|
||||
self.rem(self.r.zrange(self.order, 0, -1))
|
||||
|
||||
def rem_left(self, n=1):
|
||||
self.rem(r.zrevrange(self.order, 0, n-1))
|
||||
|
||||
def reverse(self, n=1):
|
||||
result = []
|
||||
for key in self.r.zrevrange(self.order, 0, n-1):
|
||||
result.append(self.get(key))
|
||||
return result
|
||||
|
||||
def list(self, n=0):
|
||||
result = []
|
||||
for key in self.r.zrange(self.order, 0, n-1):
|
||||
result.append(self.get(key))
|
||||
return result
|
||||
|
||||
|
||||
class FakeRedisDB(RedisDB):
|
||||
|
||||
REDIS_CLIENT = fakeredis.FakeStrictRedis
|
||||
|
@ -1,197 +0,0 @@
|
||||
|
||||
|
||||
from solar import state
|
||||
from solar.core.log import log
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
from solar import utils
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.core import actions
|
||||
|
||||
db = get_db()
|
||||
|
||||
from dictdiffer import diff, patch, revert
|
||||
from fabric import api as fabric_api
|
||||
import networkx as nx
|
||||
|
||||
|
||||
def guess_action(from_, to):
|
||||
# TODO(dshulyak) it should be more flexible
|
||||
if not from_:
|
||||
return 'run'
|
||||
elif not to:
|
||||
return 'remove'
|
||||
else:
|
||||
# it should be update
|
||||
return 'update'
|
||||
|
||||
|
||||
def connections(res, graph):
|
||||
result = []
|
||||
for pred in graph.predecessors(res.name):
|
||||
for num, edge in graph.get_edge_data(pred, res.name).items():
|
||||
if 'label' in edge:
|
||||
if ':' in edge['label']:
|
||||
parent, child = edge['label'].split(':')
|
||||
mapping = [parent, child]
|
||||
else:
|
||||
mapping = [edge['label'], edge['label']]
|
||||
else:
|
||||
mapping = None
|
||||
result.append([pred, res.name, mapping])
|
||||
return result
|
||||
|
||||
|
||||
def to_dict(resource, graph):
|
||||
res = resource.to_dict()
|
||||
res['connections'] = connections(resource, graph)
|
||||
return res
|
||||
|
||||
|
||||
def create_diff(staged, commited):
|
||||
if 'connections' in commited:
|
||||
commited['connections'].sort()
|
||||
staged['connections'].sort()
|
||||
if 'tags' in commited:
|
||||
commited['tags'].sort()
|
||||
staged['tags'].sort()
|
||||
|
||||
return list(diff(commited, staged))
|
||||
|
||||
|
||||
def _stage_changes(staged_resources, conn_graph,
|
||||
commited_resources, staged_log):
|
||||
|
||||
try:
|
||||
srt = nx.topological_sort(conn_graph)
|
||||
except:
|
||||
for cycle in nx.simple_cycles(conn_graph):
|
||||
log.debug('CYCLE: %s', cycle)
|
||||
raise
|
||||
|
||||
for res_uid in srt:
|
||||
commited_data = commited_resources.get(res_uid, {})
|
||||
staged_data = staged_resources.get(res_uid, {})
|
||||
|
||||
df = create_diff(staged_data, commited_data)
|
||||
|
||||
if df:
|
||||
|
||||
log_item = state.LogItem(
|
||||
utils.generate_uuid(),
|
||||
res_uid,
|
||||
df,
|
||||
guess_action(commited_data, staged_data))
|
||||
staged_log.append(log_item)
|
||||
return staged_log
|
||||
|
||||
|
||||
def stage_changes():
|
||||
conn_graph = signals.detailed_connection_graph()
|
||||
staged = {r.name: to_dict(r, conn_graph) for r in resource.load_all().values()}
|
||||
commited = state.CD()
|
||||
log = state.SL()
|
||||
log.delete()
|
||||
return _stage_changes(staged, conn_graph, commited, log)
|
||||
|
||||
|
||||
def execute(res, action):
|
||||
try:
|
||||
actions.resource_action(res, action)
|
||||
return state.STATES.success
|
||||
except Exception as e:
|
||||
return state.STATES.error
|
||||
|
||||
|
||||
def commit(li, resources, commited, history):
|
||||
|
||||
staged_res = resources[li.res]
|
||||
staged_data = patch(li.diff, commited.get(li.res, {}))
|
||||
|
||||
# TODO(dshulyak) think about this hack for update
|
||||
if li.action == 'update':
|
||||
commited_res = resource.wrap_resource(
|
||||
commited[li.res]['metadata'])
|
||||
result_state = execute(commited_res, 'remove')
|
||||
|
||||
staged_res.set_args_from_dict(staged_data['input'])
|
||||
|
||||
if result_state is state.STATES.success:
|
||||
result_state = execute(staged_res, 'run')
|
||||
else:
|
||||
result_state = execute(staged_res, li.action)
|
||||
|
||||
# resource_action return None in case there is no actions
|
||||
result_state = result_state or state.STATES.success
|
||||
|
||||
commited[li.res] = staged_data
|
||||
li.state = result_state
|
||||
|
||||
history.append(li)
|
||||
|
||||
if result_state is state.STATES.error:
|
||||
raise Exception('Failed')
|
||||
|
||||
|
||||
def commit_one():
|
||||
commited = state.CD()
|
||||
history = state.CL()
|
||||
staged = state.SL()
|
||||
|
||||
resources = resource.load_all()
|
||||
commit(staged.popleft(), resources, commited, history)
|
||||
|
||||
|
||||
def commit_changes():
|
||||
# just shortcut to test stuff
|
||||
commited = state.CD()
|
||||
history = state.CL()
|
||||
staged = state.SL()
|
||||
resources = resource.load_all()
|
||||
|
||||
while staged:
|
||||
commit(staged.popleft(), resources, commited, history)
|
||||
|
||||
|
||||
def rollback(log_item):
|
||||
log = state.SL()
|
||||
|
||||
resources = resource.load_all()
|
||||
commited = state.CD()[log_item.res]
|
||||
|
||||
staged = revert(log_item.diff, commited)
|
||||
|
||||
for e, r, mapping in commited.get('connections', ()):
|
||||
signals.disconnect(resources[e], resources[r])
|
||||
|
||||
for e, r, mapping in staged.get('connections', ()):
|
||||
signals.connect(resources[e], resources[r], dict([mapping]))
|
||||
|
||||
df = create_diff(staged, commited)
|
||||
|
||||
log_item = state.LogItem(
|
||||
utils.generate_uuid(),
|
||||
log_item.res, df, guess_action(commited, staged))
|
||||
log.append(log_item)
|
||||
|
||||
res = resource.load(log_item.res)
|
||||
res.set_args_from_dict(staged['input'])
|
||||
|
||||
return log_item
|
||||
|
||||
|
||||
def rollback_uid(uid):
|
||||
item = next(l for l in state.CL() if l.uid == uid)
|
||||
return rollback(item)
|
||||
|
||||
|
||||
def rollback_last():
|
||||
l = state.CL().items[-1]
|
||||
return rollback(l)
|
||||
|
||||
|
||||
def rollback_all():
|
||||
cl = state.CL()
|
||||
|
||||
while cl:
|
||||
rollback(cl.pop())
|
37
solar/solar/orchestration/executor.py
Normal file
37
solar/solar/orchestration/executor.py
Normal file
@ -0,0 +1,37 @@
|
||||
|
||||
from solar.orchestration.runner import app
|
||||
from celery import group
|
||||
|
||||
|
||||
def celery_executor(dg, tasks, control_tasks=()):
|
||||
to_execute = []
|
||||
|
||||
for task_name in tasks:
|
||||
|
||||
# task_id needs to be unique, so for each plan we will use
|
||||
# generated uid of this plan and task_name
|
||||
task_id = '{}:{}'.format(dg.graph['uid'], task_name)
|
||||
task = app.tasks[dg.node[task_name]['type']]
|
||||
|
||||
if all_success(dg, dg.predecessors(task_name)) or task_name in control_tasks:
|
||||
dg.node[task_name]['status'] = 'INPROGRESS'
|
||||
for t in generate_task(task, dg.node[task_name], task_id):
|
||||
to_execute.append(t)
|
||||
return group(to_execute)
|
||||
|
||||
|
||||
def generate_task(task, data, task_id):
|
||||
|
||||
subtask = task.subtask(
|
||||
data['args'], task_id=task_id,
|
||||
time_limit=data.get('time_limit', None),
|
||||
soft_time_limit=data.get('soft_time_limit', None))
|
||||
|
||||
if data.get('target', None):
|
||||
subtask.set(queue=data['target'])
|
||||
|
||||
yield subtask
|
||||
|
||||
|
||||
def all_success(dg, nodes):
|
||||
return all((dg.node[n]['status'] == 'SUCCESS' for n in nodes))
|
@ -7,6 +7,8 @@ import networkx as nx
|
||||
import redis
|
||||
import yaml
|
||||
|
||||
from solar import utils
|
||||
|
||||
|
||||
r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1)
|
||||
|
||||
@ -47,13 +49,35 @@ def parse_plan(plan_data):
|
||||
return dg
|
||||
|
||||
|
||||
def create_plan_from_graph(dg):
|
||||
dg.graph['uid'] = "{0}:{1}".format(dg.graph['name'], str(uuid.uuid4()))
|
||||
save_graph(dg.graph['uid'], dg)
|
||||
return dg.graph['uid']
|
||||
|
||||
|
||||
def show(uid):
|
||||
dg = get_graph(uid)
|
||||
result = {}
|
||||
tasks = []
|
||||
result['uid'] = dg.graph['uid']
|
||||
result['name'] = dg.graph['name']
|
||||
for n in nx.topological_sort(dg):
|
||||
data = dg.node[n]
|
||||
tasks.append(
|
||||
{'uid': n,
|
||||
'parameters': data,
|
||||
'before': dg.successors(n),
|
||||
'after': dg.predecessors(n)
|
||||
})
|
||||
result['tasks'] = tasks
|
||||
return utils.yaml_dump(result)
|
||||
|
||||
|
||||
def create_plan(plan_data):
|
||||
"""
|
||||
"""
|
||||
dg = parse_plan(plan_data)
|
||||
dg.graph['uid'] = "{0}:{1}".format(dg.graph['name'], str(uuid.uuid4()))
|
||||
save_graph(dg.graph['uid'], dg)
|
||||
return dg.graph['uid']
|
||||
return create_plan_from_graph(dg)
|
||||
|
||||
|
||||
def update_plan(uid, plan_data):
|
||||
@ -78,14 +102,6 @@ def reset(uid, states=None):
|
||||
save_graph(uid, dg)
|
||||
|
||||
|
||||
def soft_stop(uid):
|
||||
"""Graph will stop when all currently inprogress tasks will be finished
|
||||
"""
|
||||
dg = get_graph(uid)
|
||||
dg.graph['stop'] = True
|
||||
save_graph(uid, dg)
|
||||
|
||||
|
||||
def report_topo(uid):
|
||||
|
||||
dg = get_graph(uid)
|
||||
|
63
solar/solar/orchestration/limits.py
Normal file
63
solar/solar/orchestration/limits.py
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
|
||||
class Chain(object):
|
||||
|
||||
def __init__(self, dg, inprogress, added):
|
||||
self.dg = dg
|
||||
self.inprogress = inprogress
|
||||
self.added = added
|
||||
self.rules = []
|
||||
|
||||
def add_rule(self, rule):
|
||||
self.rules.append(rule)
|
||||
|
||||
@property
|
||||
def filtered(self):
|
||||
for item in self.added:
|
||||
for rule in self.rules:
|
||||
if not rule(self.dg, self.inprogress, item):
|
||||
break
|
||||
else:
|
||||
self.inprogress.append(item)
|
||||
yield item
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.filtered)
|
||||
|
||||
|
||||
def get_default_chain(dg, inprogress, added):
|
||||
chain = Chain(dg, inprogress, added)
|
||||
chain.add_rule(items_rule)
|
||||
chain.add_rule(target_based_rule)
|
||||
chain.add_rule(type_based_rule)
|
||||
return chain
|
||||
|
||||
|
||||
def type_based_rule(dg, inprogress, item):
|
||||
"""condition will be specified like:
|
||||
type_limit: 2
|
||||
"""
|
||||
_type = dg.node[item].get('resource_type')
|
||||
if not 'type_limit' in dg.node[item]: return True
|
||||
if not _type: return True
|
||||
|
||||
type_count = 0
|
||||
for n in inprogress:
|
||||
if dg.node[n].get('resource_type') == _type:
|
||||
type_count += 1
|
||||
return dg.node[item]['type_limit'] > type_count
|
||||
|
||||
|
||||
def target_based_rule(dg, inprogress, item, limit=1):
|
||||
target = dg.node[item].get('target')
|
||||
if not target: return True
|
||||
|
||||
target_count = 0
|
||||
for n in inprogress:
|
||||
if dg.node[n].get('target') == target:
|
||||
target_count += 1
|
||||
return limit > target_count
|
||||
|
||||
|
||||
def items_rule(dg, inprogress, item, limit=100):
|
||||
return len(inprogress) < limit
|
11
solar/solar/orchestration/runner.py
Normal file
11
solar/solar/orchestration/runner.py
Normal file
@ -0,0 +1,11 @@
|
||||
|
||||
|
||||
from celery import Celery
|
||||
|
||||
app = Celery(
|
||||
include=['solar.system_log.tasks', 'solar.orchestration.tasks'],
|
||||
backend='redis://10.0.0.2:6379/1',
|
||||
broker='redis://10.0.0.2:6379/1')
|
||||
app.conf.update(CELERY_ACCEPT_CONTENT = ['json'])
|
||||
app.conf.update(CELERY_TASK_SERIALIZER = 'json')
|
||||
|
@ -1,54 +1,54 @@
|
||||
|
||||
|
||||
|
||||
from functools import partial, wraps
|
||||
from itertools import islice
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
from celery import Celery
|
||||
from celery.app import task
|
||||
from celery import group
|
||||
from celery.exceptions import Ignore
|
||||
import redis
|
||||
|
||||
from solar.orchestration import graph
|
||||
from solar.core import actions
|
||||
from solar.core import resource
|
||||
from solar.system_log.tasks import commit_logitem, error_logitem
|
||||
from solar.orchestration.runner import app
|
||||
from solar.orchestration.traversal import traverse
|
||||
from solar.orchestration import limits
|
||||
from solar.orchestration import executor
|
||||
|
||||
|
||||
app = Celery(
|
||||
'tasks',
|
||||
backend='redis://10.0.0.2:6379/1',
|
||||
broker='redis://10.0.0.2:6379/1')
|
||||
app.conf.update(CELERY_ACCEPT_CONTENT = ['json'])
|
||||
app.conf.update(CELERY_TASK_SERIALIZER = 'json')
|
||||
|
||||
r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1)
|
||||
|
||||
|
||||
__all__ = ['solar_resource', 'cmd', 'sleep',
|
||||
'error', 'fault_tolerance', 'schedule_start', 'schedule_next']
|
||||
|
||||
# NOTE(dshulyak) i am not using celery.signals because it is not possible
|
||||
# to extract task_id from *task_success* signal
|
||||
class ReportTask(task.Task):
|
||||
|
||||
def on_success(self, retval, task_id, args, kwargs):
|
||||
schedule_next.apply_async(args=[task_id, 'SUCCESS'], queue='scheduler')
|
||||
commit_logitem.apply_async(args=[task_id], queue='system_log')
|
||||
|
||||
def on_failure(self, exc, task_id, args, kwargs, einfo):
|
||||
schedule_next.apply_async(
|
||||
args=[task_id, 'ERROR'],
|
||||
kwargs={'errmsg': str(einfo.exception)},
|
||||
queue='scheduler')
|
||||
error_logitem.apply_async(args=[task_id], queue='system_log')
|
||||
|
||||
|
||||
report_task = partial(app.task, base=ReportTask, bind=True)
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='solar_resource')
|
||||
def solar_resource(ctxt, resource_name, action):
|
||||
res = resource.load(resource_name)
|
||||
return actions.resource_action(res, action)
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='cmd')
|
||||
def cmd(ctxt, cmd):
|
||||
popen = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
@ -59,17 +59,17 @@ def cmd(ctxt, cmd):
|
||||
return popen.returncode, out, err
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='sleep')
|
||||
def sleep(ctxt, seconds):
|
||||
time.sleep(seconds)
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='error')
|
||||
def error(ctxt, message):
|
||||
raise Exception('message')
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='fault_tolerance')
|
||||
def fault_tolerance(ctxt, percent):
|
||||
task_id = ctxt.request.id
|
||||
plan_uid, task_name = task_id.rsplit(':', 1)
|
||||
@ -89,12 +89,12 @@ def fault_tolerance(ctxt, percent):
|
||||
succes_percent, percent))
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='echo')
|
||||
def echo(ctxt, message):
|
||||
return message
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='anchor')
|
||||
def anchor(ctxt, *args):
|
||||
# such tasks should be walked when atleast 1/3/exact number of resources visited
|
||||
dg = graph.get_graph('current')
|
||||
@ -104,13 +104,18 @@ def anchor(ctxt, *args):
|
||||
|
||||
|
||||
def schedule(plan_uid, dg):
|
||||
next_tasks = list(traverse(dg))
|
||||
tasks = traverse(dg)
|
||||
limit_chain = limits.get_default_chain(
|
||||
dg,
|
||||
[t for t in dg if dg.node[t]['status'] == 'INPROGRESS'],
|
||||
tasks)
|
||||
execution = executor.celery_executor(
|
||||
dg, limit_chain, control_tasks=('fault_tolerance',))
|
||||
graph.save_graph(plan_uid, dg)
|
||||
print 'GRAPH {0}\n NEXT TASKS {1}'.format(dg.node, next_tasks)
|
||||
group(next_tasks)()
|
||||
execution()
|
||||
|
||||
|
||||
@app.task
|
||||
@app.task(name='schedule_start')
|
||||
def schedule_start(plan_uid, start=None, end=None):
|
||||
"""On receive finished task should update storage with task result:
|
||||
|
||||
@ -118,11 +123,19 @@ def schedule_start(plan_uid, start=None, end=None):
|
||||
- apply different policies to tasks
|
||||
"""
|
||||
dg = graph.get_graph(plan_uid)
|
||||
dg.graph['stop'] = False
|
||||
schedule(plan_uid, dg)
|
||||
|
||||
|
||||
@app.task
|
||||
@app.task(name='soft_stop')
|
||||
def soft_stop(plan_uid):
|
||||
dg = graph.get_graph(plan_uid)
|
||||
for n in dg:
|
||||
if dg.node[n]['status'] == 'PENDING':
|
||||
dg.node[n]['status'] = 'SKIPPED'
|
||||
graph.save_graph(plan_uid, dg)
|
||||
|
||||
|
||||
@app.task(name='schedule_next')
|
||||
def schedule_next(task_id, status, errmsg=None):
|
||||
plan_uid, task_name = task_id.rsplit(':', 1)
|
||||
dg = graph.get_graph(plan_uid)
|
||||
@ -130,62 +143,3 @@ def schedule_next(task_id, status, errmsg=None):
|
||||
dg.node[task_name]['errmsg'] = errmsg
|
||||
|
||||
schedule(plan_uid, dg)
|
||||
|
||||
# TODO(dshulyak) some tasks should be evaluated even if not all predecessors
|
||||
# succeded, how to identify this?
|
||||
# - add ignor_error on edge
|
||||
# - add ignore_predecessor_errors on task in consideration
|
||||
# - make fault_tolerance not a task but a policy for all tasks
|
||||
control_tasks = [fault_tolerance, anchor]
|
||||
|
||||
|
||||
def traverse(dg):
|
||||
"""
|
||||
1. Node should be visited only when all predecessors already visited
|
||||
2. Visited nodes should have any state except PENDING, INPROGRESS, for now
|
||||
is SUCCESS or ERROR, but it can be extended
|
||||
3. If node is INPROGRESS it should not be visited once again
|
||||
"""
|
||||
visited = set()
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
if data['status'] not in ('PENDING', 'INPROGRESS'):
|
||||
visited.add(node)
|
||||
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
|
||||
if node in visited:
|
||||
continue
|
||||
elif data['status'] == 'INPROGRESS':
|
||||
continue
|
||||
|
||||
predecessors = set(dg.predecessors(node))
|
||||
|
||||
if predecessors <= visited:
|
||||
task_id = '{}:{}'.format(dg.graph['uid'], node)
|
||||
|
||||
task_name = '{}.{}'.format(__name__, data['type'])
|
||||
task = app.tasks[task_name]
|
||||
|
||||
if all_success(dg, predecessors) or task in control_tasks:
|
||||
dg.node[node]['status'] = 'INPROGRESS'
|
||||
for t in generate_task(task, dg, data, task_id):
|
||||
yield t
|
||||
|
||||
|
||||
def generate_task(task, dg, data, task_id):
|
||||
|
||||
subtask = task.subtask(
|
||||
data['args'], task_id=task_id,
|
||||
time_limit=data.get('time_limit', None),
|
||||
soft_time_limit=data.get('soft_time_limit', None))
|
||||
|
||||
if data.get('target', None):
|
||||
subtask.set(queue=data['target'])
|
||||
|
||||
yield subtask
|
||||
|
||||
|
||||
def all_success(dg, nodes):
|
||||
return all((dg.node[n]['status'] == 'SUCCESS' for n in nodes))
|
||||
|
36
solar/solar/orchestration/traversal.py
Normal file
36
solar/solar/orchestration/traversal.py
Normal file
@ -0,0 +1,36 @@
|
||||
"""
|
||||
|
||||
task should be visited only when predecessors are visited,
|
||||
visited node could be only in SUCCESS or ERROR
|
||||
|
||||
task can be scheduled for execution if it is not yet visited, and state
|
||||
not in SKIPPED, INPROGRESS
|
||||
|
||||
PENDING - task that is scheduled to be executed
|
||||
ERROR - visited node, but failed, can be failed by timeout
|
||||
SUCCESS - visited node, successfull
|
||||
INPROGRESS - task already scheduled, can be moved to ERROR or SUCCESS
|
||||
SKIPPED - not visited, and should be skipped from execution
|
||||
"""
|
||||
|
||||
|
||||
VISITED = ('SUCCESS', 'ERROR', 'NOOP')
|
||||
BLOCKED = ('INPROGRESS', 'SKIPPED')
|
||||
|
||||
|
||||
def traverse(dg):
|
||||
|
||||
visited = set()
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
if data['status'] in VISITED:
|
||||
visited.add(node)
|
||||
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
|
||||
if node in visited or data['status'] in BLOCKED:
|
||||
continue
|
||||
|
||||
if set(dg.predecessors(node)) <= visited:
|
||||
yield node
|
@ -1,152 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import collections
|
||||
from collections import deque
|
||||
from functools import partial
|
||||
|
||||
from solar import utils
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
STATES = Enum('States', 'error inprogress pending success')
|
||||
|
||||
|
||||
def state_file(name):
|
||||
if 'log' in name:
|
||||
return Log(name)
|
||||
elif 'data' in name:
|
||||
return Data(name)
|
||||
|
||||
|
||||
CD = partial(state_file, 'commited_data')
|
||||
SD = partial(state_file, 'staged_data')
|
||||
SL = partial(state_file, 'stage_log')
|
||||
IL = partial(state_file, 'inprogress_log')
|
||||
CL = partial(state_file, 'commit_log')
|
||||
|
||||
|
||||
class LogItem(object):
|
||||
|
||||
def __init__(self, uid, res_uid, diff, action, state=None):
|
||||
self.uid = uid
|
||||
self.res = res_uid
|
||||
self.diff = diff
|
||||
self.state = state or STATES.pending
|
||||
self.action = action
|
||||
|
||||
def to_yaml(self):
|
||||
return utils.yaml_dump(self.to_dict())
|
||||
|
||||
def to_dict(self):
|
||||
return {'uid': self.uid,
|
||||
'res': self.res,
|
||||
'diff': self.diff,
|
||||
'state': self.state.name,
|
||||
'action': self.action}
|
||||
|
||||
def __str__(self):
|
||||
return self.to_yaml()
|
||||
|
||||
def __repr__(self):
|
||||
return self.to_yaml()
|
||||
|
||||
|
||||
class Log(object):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
items = []
|
||||
r = db.read(path, collection=db.COLLECTIONS.state_log)
|
||||
if r:
|
||||
items = r or items
|
||||
|
||||
self.items = deque([LogItem(
|
||||
l['uid'], l['res'],
|
||||
l['diff'], l['action'],
|
||||
getattr(STATES, l['state'])) for l in items])
|
||||
|
||||
def delete(self):
|
||||
self.items = deque()
|
||||
db.delete(self.path, db.COLLECTIONS.state_log)
|
||||
|
||||
def sync(self):
|
||||
db.save(
|
||||
self.path,
|
||||
[i.to_dict() for i in self.items],
|
||||
collection=db.COLLECTIONS.state_log
|
||||
)
|
||||
|
||||
def append(self, logitem):
|
||||
self.items.append(logitem)
|
||||
self.sync()
|
||||
|
||||
def popleft(self):
|
||||
item = self.items.popleft()
|
||||
self.sync()
|
||||
return item
|
||||
|
||||
def pop(self):
|
||||
item = self.items.pop()
|
||||
self.sync()
|
||||
return item
|
||||
|
||||
def show(self, verbose=False):
|
||||
return ['L(uuid={0}, res={1}, action={2})'.format(
|
||||
l.uid, l.res, l.action) for l in self.items]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.items)
|
||||
|
||||
def __repr__(self):
|
||||
return 'Log({0})'.format(self.path)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.items)
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self.items)
|
||||
|
||||
|
||||
class Data(collections.MutableMapping):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.store = {}
|
||||
r = db.read(path, collection=db.COLLECTIONS.state_data)
|
||||
if r:
|
||||
self.store = r or self.store
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.store[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.store[key] = value
|
||||
db.save(self.path, self.store, collection=db.COLLECTIONS.state_data)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.store.pop(key)
|
||||
db.save(self.path, self.store, collection=db.COLLECTIONS.state_data)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.store)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.store)
|
0
solar/solar/system_log/__init__.py
Normal file
0
solar/solar/system_log/__init__.py
Normal file
114
solar/solar/system_log/change.py
Normal file
114
solar/solar/system_log/change.py
Normal file
@ -0,0 +1,114 @@
|
||||
|
||||
|
||||
from dictdiffer import diff, patch, revert
|
||||
import networkx as nx
|
||||
|
||||
from solar.core.log import log
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
from solar import utils
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.core import actions
|
||||
from solar.system_log import data
|
||||
from solar.orchestration import graph
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
def guess_action(from_, to):
|
||||
# NOTE(dshulyak) imo the way to solve this - is dsl for orchestration,
|
||||
# something where this action will be excplicitly specified
|
||||
if not from_:
|
||||
return 'run'
|
||||
elif not to:
|
||||
return 'remove'
|
||||
else:
|
||||
return 'update'
|
||||
|
||||
|
||||
def connections(res, graph):
|
||||
result = []
|
||||
for pred in graph.predecessors(res.name):
|
||||
for num, edge in graph.get_edge_data(pred, res.name).items():
|
||||
if 'label' in edge:
|
||||
if ':' in edge['label']:
|
||||
parent, child = edge['label'].split(':')
|
||||
mapping = [parent, child]
|
||||
else:
|
||||
mapping = [edge['label'], edge['label']]
|
||||
else:
|
||||
mapping = None
|
||||
result.append([pred, res.name, mapping])
|
||||
return result
|
||||
|
||||
|
||||
def create_diff(staged, commited):
|
||||
return list(diff(commited, staged))
|
||||
|
||||
|
||||
def _stage_changes(staged_resources, conn_graph,
|
||||
commited_resources, staged_log):
|
||||
|
||||
try:
|
||||
srt = nx.topological_sort(conn_graph)
|
||||
except:
|
||||
for cycle in nx.simple_cycles(conn_graph):
|
||||
log.debug('CYCLE: %s', cycle)
|
||||
raise
|
||||
|
||||
for res_uid in srt:
|
||||
commited_data = commited_resources.get(res_uid, {})
|
||||
staged_data = staged_resources.get(res_uid, {})
|
||||
|
||||
df = create_diff(staged_data, commited_data)
|
||||
|
||||
if df:
|
||||
log_item = data.LogItem(
|
||||
utils.generate_uuid(),
|
||||
res_uid,
|
||||
df,
|
||||
guess_action(commited_data, staged_data))
|
||||
staged_log.append(log_item)
|
||||
return staged_log
|
||||
|
||||
|
||||
def stage_changes():
|
||||
log = data.SL()
|
||||
log.clean()
|
||||
conn_graph = signals.detailed_connection_graph()
|
||||
staged = {r.name: r.args_show()
|
||||
for r in resource.load_all().values()}
|
||||
commited = data.CD()
|
||||
return _stage_changes(staged, conn_graph, commited, log)
|
||||
|
||||
|
||||
def send_to_orchestration():
|
||||
conn_graph = signals.detailed_connection_graph()
|
||||
dg = nx.DiGraph()
|
||||
staged = {r.name: r.args_show()
|
||||
for r in resource.load_all().values()}
|
||||
commited = data.CD()
|
||||
|
||||
for res_uid in nx.topological_sort(conn_graph):
|
||||
commited_data = commited.get(res_uid, {})
|
||||
staged_data = staged.get(res_uid, {})
|
||||
|
||||
df = create_diff(staged_data, commited_data)
|
||||
|
||||
if df:
|
||||
dg.add_node(
|
||||
res_uid, status='PENDING',
|
||||
errmsg=None,
|
||||
**parameters(res_uid, guess_action(commited_data, staged_data)))
|
||||
for pred in conn_graph.predecessors(res_uid):
|
||||
if pred in dg:
|
||||
dg.add_edge(pred, res_uid)
|
||||
|
||||
# what it should be?
|
||||
dg.graph['name'] = 'system_log'
|
||||
return graph.create_plan_from_graph(dg)
|
||||
|
||||
|
||||
def parameters(res, action):
|
||||
return {'args': [res, action],
|
||||
'type': 'solar_resource'}
|
129
solar/solar/system_log/data.py
Normal file
129
solar/solar/system_log/data.py
Normal file
@ -0,0 +1,129 @@
|
||||
|
||||
import os
|
||||
import collections
|
||||
from collections import deque
|
||||
from functools import partial
|
||||
|
||||
from solar import utils
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
STATES = Enum('States', 'error inprogress pending success')
|
||||
|
||||
|
||||
def state_file(name):
|
||||
if 'log' in name:
|
||||
return Log(name)
|
||||
elif 'data' in name:
|
||||
return Data(name)
|
||||
|
||||
|
||||
CD = partial(state_file, 'commited_data')
|
||||
SL = partial(state_file, 'stage_log')
|
||||
CL = partial(state_file, 'commit_log')
|
||||
|
||||
|
||||
class LogItem(object):
|
||||
|
||||
def __init__(self, uid, res, diff, action, state=None):
|
||||
self.uid = uid
|
||||
self.res = res
|
||||
self.diff = diff
|
||||
self.state = state or STATES.pending
|
||||
self.action = action
|
||||
|
||||
def to_yaml(self):
|
||||
return utils.yaml_dump(self.to_dict())
|
||||
|
||||
def to_dict(self):
|
||||
return {'uid': self.uid,
|
||||
'res': self.res,
|
||||
'diff': self.diff,
|
||||
'state': self.state.name,
|
||||
'action': self.action}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, **kwargs):
|
||||
state = getattr(STATES, kwargs.get('state', ''), STATES.pending)
|
||||
kwargs['state'] = state
|
||||
return cls(**kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return self.to_yaml()
|
||||
|
||||
def __repr__(self):
|
||||
return self.to_yaml()
|
||||
|
||||
|
||||
class Log(object):
|
||||
|
||||
def __init__(self, path):
|
||||
self.ordered_log = db.get_set(path)
|
||||
|
||||
def append(self, logitem):
|
||||
self.ordered_log.add([(logitem.res, logitem.to_dict())])
|
||||
|
||||
def pop(self, uid):
|
||||
item = self.get(uid)
|
||||
if not item:
|
||||
return None
|
||||
self.ordered_log.rem([uid])
|
||||
return item
|
||||
|
||||
def update(self, logitem):
|
||||
self.ordered_log.update(logitem.res, logitem.to_dict())
|
||||
|
||||
def clean(self):
|
||||
self.ordered_log.clean()
|
||||
|
||||
def get(self, key):
|
||||
item = self.ordered_log.get(key)
|
||||
if item:
|
||||
return LogItem.from_dict(**item)
|
||||
return None
|
||||
|
||||
def collection(self, n=0):
|
||||
for item in self.ordered_log.reverse(n=n):
|
||||
yield LogItem.from_dict(**item)
|
||||
|
||||
def reverse(self, n=0):
|
||||
for item in self.ordered_log.list(n=n):
|
||||
yield LogItem.from_dict(**item)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.collection())
|
||||
|
||||
|
||||
class Data(collections.MutableMapping):
|
||||
|
||||
def __init__(self, path):
|
||||
self.path = path
|
||||
self.store = {}
|
||||
r = db.read(path, collection=db.COLLECTIONS.state_data)
|
||||
if r:
|
||||
self.store = r or self.store
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self.store[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
self.store[key] = value
|
||||
db.save(self.path, self.store, collection=db.COLLECTIONS.state_data)
|
||||
|
||||
def __delitem__(self, key):
|
||||
self.store.pop(key)
|
||||
db.save(self.path, self.store, collection=db.COLLECTIONS.state_data)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.store)
|
||||
|
||||
def __len__(self):
|
||||
return len(self.store)
|
||||
|
||||
def clean(self):
|
||||
db.save(self.path, {}, collection=db.COLLECTIONS.state_data)
|
24
solar/solar/system_log/operations.py
Normal file
24
solar/solar/system_log/operations.py
Normal file
@ -0,0 +1,24 @@
|
||||
|
||||
|
||||
from solar.system_log import data
|
||||
from dictdiffer import patch
|
||||
|
||||
|
||||
def set_error(task_uuid, *args, **kwargs):
|
||||
sl = data.SL()
|
||||
item = sl.get(task_uuid)
|
||||
if item:
|
||||
item.state = data.STATES.error
|
||||
sl.update(item)
|
||||
|
||||
|
||||
def move_to_commited(task_uuid, *args, **kwargs):
|
||||
sl = data.SL()
|
||||
item = sl.pop(task_uuid)
|
||||
if item:
|
||||
commited = data.CD()
|
||||
staged_data = patch(item.diff, commited.get(item.res, {}))
|
||||
cl = data.CL()
|
||||
item.state = data.STATES.success
|
||||
cl.append(item)
|
||||
commited[item.res] = staged_data
|
16
solar/solar/system_log/tasks.py
Normal file
16
solar/solar/system_log/tasks.py
Normal file
@ -0,0 +1,16 @@
|
||||
|
||||
|
||||
from solar.orchestration.runner import app
|
||||
from solar.system_log.operations import set_error, move_to_commited
|
||||
|
||||
__all__ = ['error_logitem', 'commit_logitem']
|
||||
|
||||
|
||||
@app.task
|
||||
def error_logitem(task_uuid):
|
||||
return set_error(task_uuid.rsplit(':', 1)[-1])
|
||||
|
||||
|
||||
@app.task
|
||||
def commit_logitem(task_uuid):
|
||||
return move_to_commited(task_uuid.rsplit(':', 1)[-1])
|
@ -4,7 +4,7 @@ import tempfile
|
||||
import unittest
|
||||
import yaml
|
||||
|
||||
from solar.core import virtual_resource as vr
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.core import signals as xs
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
22
solar/solar/test/test_celery_executor.py
Normal file
22
solar/solar/test/test_celery_executor.py
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
import networkx as nx
|
||||
from pytest import fixture
|
||||
from mock import patch
|
||||
|
||||
from solar.orchestration import executor
|
||||
|
||||
|
||||
@fixture
|
||||
def dg():
|
||||
ex = nx.DiGraph()
|
||||
ex.add_node('t1', args=['t'], status='PENDING', type='echo')
|
||||
ex.graph['uid'] = 'some_string'
|
||||
return ex
|
||||
|
||||
|
||||
@patch.object(executor, 'app')
|
||||
def test_celery_executor(mapp, dg):
|
||||
"""Just check that it doesnt fail for now.
|
||||
"""
|
||||
assert executor.celery_executor(dg, ['t1'])
|
||||
assert dg.node['t1']['status'] == 'INPROGRESS'
|
@ -3,7 +3,7 @@ from pytest import fixture
|
||||
from dictdiffer import revert, patch
|
||||
import networkx as nx
|
||||
|
||||
from solar import operations
|
||||
from solar.system_log import change
|
||||
from solar.core.resource import wrap_resource
|
||||
|
||||
|
||||
@ -32,12 +32,12 @@ def commited():
|
||||
|
||||
@fixture
|
||||
def full_diff(staged):
|
||||
return operations.create_diff(staged, {})
|
||||
return change.create_diff(staged, {})
|
||||
|
||||
|
||||
@fixture
|
||||
def diff_for_update(staged, commited):
|
||||
return operations.create_diff(staged, commited)
|
||||
return change.create_diff(staged, commited)
|
||||
|
||||
|
||||
def test_create_diff_with_empty_commited(full_diff):
|
||||
@ -98,7 +98,7 @@ def conn_graph():
|
||||
|
||||
def test_stage_changes(resources, conn_graph):
|
||||
commited = {}
|
||||
log = operations._stage_changes(resources, conn_graph, commited, [])
|
||||
log = change._stage_changes(resources, conn_graph, commited, [])
|
||||
|
||||
assert len(log) == 3
|
||||
assert [l.res for l in log] == ['n.1', 'r.1', 'h.1']
|
||||
|
50
solar/solar/test/test_limits.py
Normal file
50
solar/solar/test/test_limits.py
Normal file
@ -0,0 +1,50 @@
|
||||
|
||||
|
||||
from pytest import fixture
|
||||
import networkx as nx
|
||||
|
||||
from solar.orchestration import limits
|
||||
|
||||
|
||||
@fixture
|
||||
def dg():
|
||||
ex = nx.DiGraph()
|
||||
ex.add_node('t1', status='PENDING', target='1',
|
||||
resource_type='node', type_limit=2)
|
||||
ex.add_node('t2', status='PENDING', target='1',
|
||||
resource_type='node', type_limit=2)
|
||||
ex.add_node('t3', status='PENDING', target='1',
|
||||
resource_type='node', type_limit=2)
|
||||
return ex
|
||||
|
||||
|
||||
def test_target_rule(dg):
|
||||
|
||||
assert limits.target_based_rule(dg, [], 't1') == True
|
||||
assert limits.target_based_rule(dg, ['t1'], 't2') == False
|
||||
|
||||
|
||||
def test_type_limit_rule(dg):
|
||||
assert limits.type_based_rule(dg, ['t1'], 't2') == True
|
||||
assert limits.type_based_rule(dg, ['t1', 't2'], 't3') == False
|
||||
|
||||
|
||||
def test_items_rule(dg):
|
||||
|
||||
assert limits.items_rule(dg, ['1']*99, '2')
|
||||
assert limits.items_rule(dg, ['1']*99, '2', limit=10) == False
|
||||
|
||||
|
||||
@fixture
|
||||
def target_dg():
|
||||
ex = nx.DiGraph()
|
||||
ex.add_node('t1', status='PENDING', target='1')
|
||||
ex.add_node('t2', status='PENDING', target='1')
|
||||
|
||||
return ex
|
||||
|
||||
|
||||
def test_filtering_chain(target_dg):
|
||||
|
||||
chain = limits.get_default_chain(target_dg, [], ['t1', 't2'])
|
||||
assert list(chain) == ['t1']
|
@ -1,74 +0,0 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from solar.core import resource
|
||||
from solar import operations
|
||||
from solar import state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_resources():
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
|
||||
node1 = resource.wrap_resource(
|
||||
{'id': 'node1',
|
||||
'input': {'ip': {'value':'10.0.0.3'}}})
|
||||
rabbitmq_service1 = resource.wrap_resource(
|
||||
{'id':'rabbitmq',
|
||||
'input': {
|
||||
'ip' : {'value': ''},
|
||||
'image': {'value': 'rabbitmq:3-management'}}})
|
||||
signals.connect(node1, rabbitmq_service1)
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("default_resources")
|
||||
def test_changes_on_update_image():
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
operations.commit_changes()
|
||||
|
||||
rabbitmq = resource.load('rabbitmq')
|
||||
rabbitmq.update({'image': 'different'})
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 1
|
||||
|
||||
item = log.items[0]
|
||||
|
||||
assert item.diff == [
|
||||
('change', u'input.image.value',
|
||||
(u'rabbitmq:3-management', u'different')),
|
||||
('change', u'metadata.input.image.value',
|
||||
(u'rabbitmq:3-management', u'different'))]
|
||||
|
||||
assert item.action == 'update'
|
||||
|
||||
operations.commit_changes()
|
||||
|
||||
commited = state.CD()
|
||||
|
||||
assert commited['rabbitmq']['input']['image'] == {
|
||||
u'emitter': None, u'value': u'different'}
|
||||
|
||||
reverse = operations.rollback(state.CL().items[-1])
|
||||
|
||||
assert reverse.diff == [
|
||||
('change', u'input.image.value',
|
||||
(u'different', u'rabbitmq:3-management')),
|
||||
('change', u'metadata.input.image.value',
|
||||
(u'different', u'rabbitmq:3-management'))]
|
||||
|
||||
operations.commit_changes()
|
||||
|
||||
commited = state.CD()
|
||||
|
||||
assert commited['rabbitmq']['input']['image'] == {
|
||||
u'emitter': None, u'value': u'rabbitmq:3-management'}
|
||||
|
||||
|
||||
|
||||
|
56
solar/solar/test/test_traversal.py
Normal file
56
solar/solar/test/test_traversal.py
Normal file
@ -0,0 +1,56 @@
|
||||
|
||||
|
||||
import networkx as nx
|
||||
from pytest import fixture
|
||||
|
||||
from solar.orchestration.traversal import traverse
|
||||
|
||||
@fixture
|
||||
def tasks():
|
||||
return [
|
||||
{'id': 't1', 'status': 'PENDING'},
|
||||
{'id': 't2', 'status': 'PENDING'},
|
||||
{'id': 't3', 'status': 'PENDING'},
|
||||
{'id': 't4', 'status': 'PENDING'},
|
||||
{'id': 't5', 'status': 'PENDING'}]
|
||||
|
||||
@fixture
|
||||
def dg(tasks):
|
||||
ex = nx.DiGraph()
|
||||
for t in tasks:
|
||||
ex.add_node(t['id'], status=t['status'])
|
||||
return ex
|
||||
|
||||
|
||||
def test_parallel(dg):
|
||||
dg.add_path(['t1', 't3', 't4', 't5'])
|
||||
dg.add_path(['t2', 't3'])
|
||||
|
||||
assert set(traverse(dg)) == {'t1', 't2'}
|
||||
|
||||
|
||||
def test_walked_only_when_all_predecessors_visited(dg):
|
||||
dg.add_path(['t1', 't3', 't4', 't5'])
|
||||
dg.add_path(['t2', 't3'])
|
||||
|
||||
dg.node['t1']['status'] = 'SUCCESS'
|
||||
dg.node['t2']['status'] = 'INPROGRESS'
|
||||
|
||||
assert set(traverse(dg)) == set()
|
||||
|
||||
dg.node['t2']['status'] = 'SUCCESS'
|
||||
|
||||
assert set(traverse(dg)) == {'t3'}
|
||||
|
||||
|
||||
def test_nothing_will_be_walked_if_parent_is_skipped(dg):
|
||||
dg.add_path(['t1', 't2', 't3', 't4', 't5'])
|
||||
dg.node['t1']['status'] = 'SKIPPED'
|
||||
|
||||
assert set(traverse(dg)) == set()
|
||||
|
||||
def test_node_will_be_walked_if_parent_is_noop(dg):
|
||||
dg.add_path(['t1', 't2', 't3', 't4', 't5'])
|
||||
dg.node['t1']['status'] = 'NOOP'
|
||||
|
||||
assert set(traverse(dg)) == {'t2'}
|
@ -1,169 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
from solar import operations
|
||||
|
||||
@pytest.fixture
|
||||
def resources():
|
||||
|
||||
node1 = resource.wrap_resource(
|
||||
{'id': 'node1',
|
||||
'input': {'ip': {'value': '10.0.0.3'}}})
|
||||
mariadb_service1 = resource.wrap_resource(
|
||||
{'id': 'mariadb',
|
||||
'input': {
|
||||
'port' : {'value': 3306},
|
||||
'ip': {'value': ''}}})
|
||||
keystone_db = resource.wrap_resource(
|
||||
{'id':'keystone_db',
|
||||
'input': {
|
||||
'login_port' : {'value': ''},
|
||||
'ip': {'value': ''}}})
|
||||
signals.connect(node1, mariadb_service1)
|
||||
signals.connect(node1, keystone_db)
|
||||
signals.connect(mariadb_service1, keystone_db, {'port': 'login_port'})
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
def test_update_port_on_mariadb(resources):
|
||||
operations.stage_changes()
|
||||
operations.commit_changes()
|
||||
|
||||
mariadb = resources['mariadb']
|
||||
|
||||
mariadb.update({'port': 4400})
|
||||
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
mariadb_log = log.items[0]
|
||||
|
||||
assert mariadb_log.diff == [
|
||||
('change', u'input.port.value', (3306, 4400)),
|
||||
('change', u'metadata.input.port.value', (3306, 4400))]
|
||||
|
||||
keystone_db_log = log.items[1]
|
||||
|
||||
assert keystone_db_log.diff == [
|
||||
('change', u'input.login_port.value', (3306, 4400)),
|
||||
('change', u'metadata.input.login_port.value', (3306, 4400))]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def simple_input():
|
||||
res1 = resource.wrap_resource(
|
||||
{'id': 'res1',
|
||||
'input': {'ip': {'value': '10.10.0.2'}}})
|
||||
res2 = resource.wrap_resource(
|
||||
{'id': 'res2',
|
||||
'input': {'ip': {'value': '10.10.0.3'}}})
|
||||
|
||||
signals.connect(res1, res2)
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
def test_update_simple_resource(simple_input):
|
||||
operations.stage_changes()
|
||||
operations.commit_changes()
|
||||
|
||||
res1 = simple_input['res1']
|
||||
res1.update({'ip': '10.0.0.3'})
|
||||
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
assert log.items[0].diff == [
|
||||
('change', u'input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
('change', 'metadata.input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
]
|
||||
assert log.items[1].diff == [
|
||||
('change', u'input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
('change', 'metadata.input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
]
|
||||
|
||||
operations.commit_changes()
|
||||
assert simple_input['res1'].args_dict() == {
|
||||
'ip': '10.0.0.3',
|
||||
}
|
||||
assert simple_input['res2'].args_dict() == {
|
||||
'ip': '10.0.0.3',
|
||||
}
|
||||
|
||||
log_item = operations.rollback_last()
|
||||
assert log_item.diff == [
|
||||
('change', u'input.ip.value', (u'10.0.0.3', u'10.10.0.2')),
|
||||
('change', 'metadata.input.ip.value', ('10.0.0.3', '10.10.0.2')),
|
||||
]
|
||||
|
||||
res2 = resource.load('res2')
|
||||
assert res2.args_dict() == {
|
||||
'ip': '10.10.0.2',
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def list_input():
|
||||
res1 = resource.wrap_resource(
|
||||
{'id': 'res1',
|
||||
'input': {'ip': {'value': '10.10.0.2'}}})
|
||||
res2 = resource.wrap_resource(
|
||||
{'id': 'res2',
|
||||
'input': {'ip': {'value': '10.10.0.3'}}})
|
||||
consumer = resource.wrap_resource(
|
||||
{'id': 'consumer',
|
||||
'input':
|
||||
{'ips': {'value': [],
|
||||
'schema': ['str']}}})
|
||||
|
||||
signals.connect(res1, consumer, {'ip': 'ips'})
|
||||
signals.connect(res2, consumer, {'ip': 'ips'})
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
def test_update_list_resource(list_input):
|
||||
operations.stage_changes()
|
||||
operations.commit_changes()
|
||||
|
||||
res3 = resource.wrap_resource(
|
||||
{'id': 'res3',
|
||||
'input': {'ip': {'value': '10.10.0.4'}}})
|
||||
signals.connect(res3, list_input['consumer'], {'ip': 'ips'})
|
||||
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
assert log.items[0].res == res3.name
|
||||
assert log.items[1].diff == [
|
||||
('add', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]),
|
||||
('add', u'input.ips', [
|
||||
(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]),
|
||||
('add', u'metadata.input.ips.value',
|
||||
[(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])]
|
||||
|
||||
operations.commit_changes()
|
||||
assert list_input['consumer'].args_dict() == {
|
||||
u'ips': [
|
||||
{u'emitter_attached_to': u'res1', u'emitter': u'ip', u'value': u'10.10.0.2'},
|
||||
{u'emitter_attached_to': u'res2', u'emitter': u'ip', u'value': u'10.10.0.3'},
|
||||
{u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'}]}
|
||||
|
||||
log_item = operations.rollback_last()
|
||||
assert log_item.diff == [
|
||||
('remove', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]),
|
||||
('remove', u'input.ips', [
|
||||
(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]),
|
||||
('remove', u'metadata.input.ips.value',
|
||||
[(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])]
|
||||
|
||||
consumer = resource.load('consumer')
|
||||
assert consumer.args_dict() == {
|
||||
u'ips': [{u'emitter': u'ip',
|
||||
u'emitter_attached_to': u'res1',
|
||||
u'value': u'10.10.0.2'},
|
||||
{u'emitter': u'ip',
|
||||
u'emitter_attached_to': u'res2',
|
||||
u'value': u'10.10.0.3'}]}
|
2
solar/test-requirements.txt
Normal file
2
solar/test-requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
-r requirements.txt
|
||||
mock
|
@ -25,6 +25,7 @@ resources:
|
||||
login_password: 'mariadb_service::root_password'
|
||||
login_port: 'mariadb_service::port'
|
||||
db_name: 'keystone'
|
||||
db_host: 'mariadb_service::ip'
|
||||
user_password: 'keystone'
|
||||
user_name: 'keystone'
|
||||
ip: '{{ip}}'
|
||||
@ -35,19 +36,18 @@ resources:
|
||||
from: templates/keystone_api.yml
|
||||
values:
|
||||
idx: 1
|
||||
image: 'kollaglue/centos-rdo-k-keystone'
|
||||
config_dir: '/etc/solar/keystone_config_1'
|
||||
db_password: 'keystone_db_user::user_password'
|
||||
db_user: 'keystone_db_user::user_name'
|
||||
db_port: 'keystone_db_user::login_port'
|
||||
db_name: 'keystone_db_user::db_name'
|
||||
db_host: 'mariadb_service::ip'
|
||||
admin_token: 132fdsfwqee
|
||||
admin_port: 35357
|
||||
port: 5000
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
# TODO: HAproxy
|
||||
|
||||
- id: openstack_base
|
||||
from: templates/openstack_base.yml
|
||||
values:
|
||||
@ -57,34 +57,20 @@ resources:
|
||||
keystone_ip: 'keystone_service_1::ip'
|
||||
keystone_admin_port: 'keystone_service_1::admin_port'
|
||||
keystone_port: 'keystone_service_1::port'
|
||||
admin_token: 'keystone_config_1::admin_token'
|
||||
admin_token: 'keystone_service_1::admin_token'
|
||||
|
||||
- id: glance_base
|
||||
from: templates/glance_base.yml
|
||||
- id: openrc_file
|
||||
from: resources/openrc_file
|
||||
values:
|
||||
login_user: root
|
||||
login_password: 'mariadb_service::root_password'
|
||||
login_port: 'mariadb_service::port'
|
||||
db_name: 'glance'
|
||||
user_password: 'glance'
|
||||
user_name: 'glance'
|
||||
keystone_host: 'keystone_service_1::ip'
|
||||
keystone_port: 'keystone_service_1::admin_port'
|
||||
tenant: 'admin_user::tenant_name'
|
||||
user_name: 'admin_user::user_name'
|
||||
password: 'admin_user::user_password'
|
||||
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
- id: glance_registry_1
|
||||
from: templates/glance_registry.yml
|
||||
values:
|
||||
idx: 1
|
||||
keystone_admin_port: 'keystone_service_1::admin_port'
|
||||
keystone_ip: 'keystone_service_1::ip'
|
||||
mysql_password: 'glance_db_user::user_password'
|
||||
mysql_user: 'keystone_db_user::user_name'
|
||||
mysql_db: 'keystone_db_user::db_name'
|
||||
mysql_ip: 'mariadb_service::ip'
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
tags: ['resources/controller', 'resource/primary_controller']
|
||||
|
||||
|
@ -1,27 +1,20 @@
|
||||
id: keystone_api_{{idx}}
|
||||
|
||||
resources:
|
||||
- id: keystone_config_{{idx}}
|
||||
from: resources/keystone_config
|
||||
- id: keystone_service_{{idx}}
|
||||
from: resources/keystone_puppet
|
||||
values:
|
||||
config_dir: '/etc/solar/keystone_{{idx}}'
|
||||
admin_token: '{{admin_token}}'
|
||||
db_host: '{{db_host}}'
|
||||
db_port: '{{db_port}}'
|
||||
db_name: '{{db_name}}'
|
||||
db_user: '{{db_user}}'
|
||||
db_password: '{{db_password}}'
|
||||
|
||||
admin_port: {{admin_port}}
|
||||
port: {{port}}
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
|
||||
- id: keystone_service_{{idx}}
|
||||
from: resources/keystone_service
|
||||
values:
|
||||
image: 'kollaglue/centos-rdo-j-keystone'
|
||||
config_dir: 'keystone_config_{{idx}}::config_dir'
|
||||
ip: 'keystone_config_{{idx}}::ip'
|
||||
ssh_user: 'keystone_config_{{idx}}::ssh_user'
|
||||
ssh_key: 'keystone_config_{{idx}}::ssh_key'
|
||||
|
||||
tags: ['resources/keystone', 'resource/keystone_api']
|
||||
|
@ -4,7 +4,8 @@ resources:
|
||||
- id: keystone_db
|
||||
from: resources/mariadb_db
|
||||
values:
|
||||
db_name: {{db_name}}
|
||||
db_name: '{{db_name}}'
|
||||
db_host: '{{db_host}}'
|
||||
login_user: '{{login_user}}'
|
||||
login_password: '{{login_password}}'
|
||||
login_port: '{{login_port}}'
|
||||
@ -18,6 +19,7 @@ resources:
|
||||
user_password: '{{user_password}}'
|
||||
user_name: '{{user_name}}'
|
||||
db_name: 'keystone_db::db_name'
|
||||
db_host: '{{db_host}}'
|
||||
login_user: 'keystone_db::login_user'
|
||||
login_password: 'keystone_db::login_password'
|
||||
login_port: 'keystone_db::login_port'
|
||||
|
@ -25,6 +25,19 @@ resources:
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
- id: admin_role
|
||||
from: resources/keystone_role
|
||||
values:
|
||||
role_name: 'admin'
|
||||
user_name: 'admin_user::user_name'
|
||||
tenant_name: 'admin_user::tenant_name'
|
||||
keystone_port: '{{keystone_admin_port}}'
|
||||
keystone_host: '{{keystone_ip}}'
|
||||
admin_token: '{{admin_token}}'
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
- id: keystone_service_endpoint
|
||||
from: resources/keystone_service_endpoint
|
||||
values:
|
||||
@ -35,7 +48,7 @@ resources:
|
||||
{% endraw %}
|
||||
description: 'OpenStack Identity Service'
|
||||
type: 'identity'
|
||||
name: 'keystone'
|
||||
endpoint_name: 'keystone'
|
||||
admin_port: '{{keystone_admin_port}}'
|
||||
public_port: '{{keystone_port}}'
|
||||
internal_port: '{{keystone_port}}'
|
||||
|
Loading…
Reference in New Issue
Block a user