Merge branch 'master' into nova-api
Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com> Conflicts: example-puppet.py
This commit is contained in:
commit
4470c7d200
@ -61,7 +61,9 @@ solar connect mariadb_service keystone_db '{"root_password": "login_password", "
|
||||
solar connect keystone_db keystone_db_user
|
||||
|
||||
solar changes stage
|
||||
solar changes commit
|
||||
solar changes proccess
|
||||
<uid>
|
||||
solar orch run-once <uid>
|
||||
```
|
||||
|
||||
You can fiddle with the above configuration like this:
|
||||
@ -70,7 +72,9 @@ solar resource update keystone_db_user '{"user_password": "new_keystone_password
|
||||
solar resource update keystone_db_user user_password=new_keystone_password # another valid format
|
||||
|
||||
solar changes stage
|
||||
solar changes commit
|
||||
solar changes proccess
|
||||
<uid>
|
||||
solar orch run-once <uid>
|
||||
```
|
||||
|
||||
* Show the connections/graph:
|
||||
|
4
Vagrantfile
vendored
4
Vagrantfile
vendored
@ -14,9 +14,11 @@ SCRIPT
|
||||
|
||||
slave_script = <<SCRIPT
|
||||
apt-get update
|
||||
apt-get upgrade
|
||||
apt-get dist-upgrade
|
||||
apt-get -y install python-pip python-dev
|
||||
pip install ansible
|
||||
ansible-playbook -i "localhost," -c local /vagrant/main.yml /vagrant/docker.yml /vagrant/slave.yml
|
||||
ansible-playbook -i "localhost," -c local /vagrant/main.yml /vagrant/docker.yml /vagrant/slave.yml /vagrant/slave_cinder.yml
|
||||
SCRIPT
|
||||
|
||||
master_celery = <<SCRIPT
|
||||
|
65
docs/orchestration.md
Normal file
65
docs/orchestration.md
Normal file
@ -0,0 +1,65 @@
|
||||
# Overview of orchestration commands and system log integration
|
||||
|
||||
After user created all required resource - it is possible to automatically
|
||||
detect which resource requires changes with
|
||||
|
||||
```
|
||||
solar changes stage
|
||||
```
|
||||
|
||||
After changes are staged - they will be used to populate history which can be seen
|
||||
with command (*n* option used to limit number of items, -1 will return all changes)
|
||||
|
||||
```
|
||||
solar changes history -n 5
|
||||
```
|
||||
|
||||
User is able to generate deployment scenario based on changes found by system log.
|
||||
```
|
||||
solar changes process
|
||||
```
|
||||
|
||||
This command will prepare deployment graph, and return uid of deployment graph to
|
||||
work with.
|
||||
|
||||
All commands that are able to manipulate deployment graph located in
|
||||
*orch* namespace.
|
||||
|
||||
Report will print all deployment tasks in topological order, with status,
|
||||
and error if status of task is *ERROR*
|
||||
```
|
||||
solar orch report <uid>
|
||||
```
|
||||
|
||||
To see picture of deployment dependencies one can use following command
|
||||
```
|
||||
solar orch dg <uid>
|
||||
```
|
||||
Keep in mind that it is not representation of all edges that are kept in graph,
|
||||
we are using trasitive reduction to leave only edges that are important for the
|
||||
order of traversal.
|
||||
|
||||
Execute deployment
|
||||
```
|
||||
solar orch run-once <uid>
|
||||
```
|
||||
|
||||
Gracefully stop deployment, after all already scheduled tasks are finished
|
||||
```
|
||||
solar orch stop <uid>
|
||||
```
|
||||
|
||||
Continue deployment execution for all tasks that are SKIPPED
|
||||
```
|
||||
solar orch resume <uid>
|
||||
```
|
||||
|
||||
All tasks will be returned to PENDING state, and deployment will be restarted
|
||||
```
|
||||
solar orch restart <uid>
|
||||
```
|
||||
|
||||
Orchestrator will retry tasks in ERROR state and continue execution
|
||||
```
|
||||
solar orch retry <uid>
|
||||
```
|
@ -7,6 +7,7 @@ from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
@ -69,13 +70,13 @@ def deploy():
|
||||
|
||||
# KEYSTONE
|
||||
keystone_puppet = vr.create('keystone_puppet', 'resources/keystone_puppet', {})[0]
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_keystone_db/', {
|
||||
keystone_db = vr.create('keystone_db', 'resources/mariadb_db/', {
|
||||
'db_name': 'keystone_db',
|
||||
'login_user': 'root'
|
||||
})[0]
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_keystone_user/', {
|
||||
'new_user_name': 'keystone',
|
||||
'new_user_password': 'keystone',
|
||||
keystone_db_user = vr.create('keystone_db_user', 'resources/mariadb_user/', {
|
||||
'user_name': 'keystone',
|
||||
'user_password': 'keystone',
|
||||
})[0]
|
||||
keystone_service_endpoint = vr.create('keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'keystone',
|
||||
@ -107,12 +108,14 @@ def deploy():
|
||||
'port': 'login_port',
|
||||
'root_user': 'login_user',
|
||||
'root_password': 'login_password',
|
||||
'ip' : 'db_host',
|
||||
})
|
||||
signals.connect(keystone_db, keystone_db_user, {
|
||||
'db_name',
|
||||
'login_port',
|
||||
'login_user',
|
||||
'login_password'
|
||||
'login_password',
|
||||
'db_host'
|
||||
})
|
||||
|
||||
signals.connect(node1, keystone_service_endpoint)
|
||||
@ -141,8 +144,9 @@ def deploy():
|
||||
'db_name',
|
||||
})
|
||||
signals.connect(keystone_db_user, keystone_puppet, {
|
||||
'new_user_name': 'db_user',
|
||||
'new_user_password': 'db_password'
|
||||
'user_name': 'db_user',
|
||||
'user_password': 'db_password',
|
||||
'db_host' : 'db_host'
|
||||
})
|
||||
|
||||
# OPENRC
|
||||
@ -154,17 +158,14 @@ def deploy():
|
||||
|
||||
# NEUTRON
|
||||
# TODO: vhost cannot be specified in neutron Puppet manifests so this user has to be admin anyways
|
||||
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {
|
||||
'rabbitmq_user': 'guest',
|
||||
'rabbitmq_password': 'guest'
|
||||
})[0]
|
||||
neutron_puppet = vr.create('neutron_puppet', 'resources/neutron_puppet', {})[0]
|
||||
|
||||
neutron_keystone_user = vr.create('neutron_keystone_user', 'resources/keystone_user', {
|
||||
'user_name': 'neutron',
|
||||
'user_password': 'neutron'
|
||||
})[0]
|
||||
neutron_keystone_role = vr.create('neutron_keystone_role', 'resources/keystone_role', {
|
||||
'role_name': 'neutron'
|
||||
'role_name': 'admin'
|
||||
})[0]
|
||||
neutron_keystone_service_endpoint = vr.create('neutron_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'neutron',
|
||||
@ -180,6 +181,11 @@ def deploy():
|
||||
'ip': 'rabbitmq_host',
|
||||
'port': 'rabbitmq_port'
|
||||
})
|
||||
signals.connect(openstack_rabbitmq_user, neutron_puppet, {
|
||||
'user_name': 'rabbitmq_user',
|
||||
'password': 'rabbitmq_password'})
|
||||
signals.connect(openstack_vhost, neutron_puppet, {
|
||||
'vhost_name': 'rabbitmq_virtual_host'})
|
||||
signals.connect(admin_user, neutron_puppet, {
|
||||
'user_name': 'keystone_user',
|
||||
'user_password': 'keystone_password',
|
||||
@ -203,40 +209,75 @@ def deploy():
|
||||
'port': ['admin_port', 'internal_port', 'public_port'],
|
||||
})
|
||||
|
||||
# # CINDER
|
||||
# cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {
|
||||
# 'rabbit_userid': 'guest', 'rabbit_password': 'guest'})[0]
|
||||
# cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
|
||||
# 'db_name': 'cinder_db', 'login_user': 'root'})[0]
|
||||
# cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', {
|
||||
# 'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
|
||||
# cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {
|
||||
# 'user_name': 'cinder', 'user_password': 'cinder'})[0]
|
||||
# cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {
|
||||
# 'role_name': 'cinder'})[0]
|
||||
# cinder_keystone_service_endpoint = vr.create(
|
||||
# 'cinder_keystone_service_endpoint', 'resources/keystone_service_endpoint', {
|
||||
# 'adminurl': 'http://{{admin_ip}}:{{admin_port}}',
|
||||
# 'internalurl': 'http://{{internal_ip}}:{{internal_port}}',
|
||||
# 'publicurl': 'http://{{public_ip}}:{{public_port}}',
|
||||
# 'description': 'OpenStack Network Service', 'type': 'network'})[0]
|
||||
# CINDER
|
||||
cinder_puppet = vr.create('cinder_puppet', 'resources/cinder_puppet', {})[0]
|
||||
cinder_db = vr.create('cinder_db', 'resources/mariadb_db/', {
|
||||
'db_name': 'cinder_db', 'login_user': 'root'})[0]
|
||||
cinder_db_user = vr.create('cinder_db_user', 'resources/mariadb_user/', {
|
||||
'user_name': 'cinder', 'user_password': 'cinder', 'login_user': 'root'})[0]
|
||||
cinder_keystone_user = vr.create('cinder_keystone_user', 'resources/keystone_user', {
|
||||
'user_name': 'cinder', 'user_password': 'cinder'})[0]
|
||||
cinder_keystone_role = vr.create('cinder_keystone_role', 'resources/keystone_role', {
|
||||
'role_name': 'admin'})[0]
|
||||
cinder_keystone_service_endpoint = vr.create(
|
||||
'cinder_keystone_service_endpoint',
|
||||
'resources/keystone_service_endpoint', {
|
||||
'endpoint_name': 'cinder',
|
||||
'adminurl': 'http://{{admin_ip}}:{{admin_port}}/v2/%(tenant_id)s',
|
||||
'internalurl': 'http://{{internal_ip}}:{{internal_port}}/v2/%(tenant_id)s',
|
||||
'publicurl': 'http://{{public_ip}}:{{public_port}}/v2/%(tenant_id)s',
|
||||
'description': 'OpenStack Block Storage Service', 'type': 'volume'})[0]
|
||||
|
||||
signals.connect(node1, cinder_puppet)
|
||||
signals.connect(node1, cinder_db)
|
||||
signals.connect(node1, cinder_db_user)
|
||||
signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
|
||||
signals.connect(admin_user, cinder_puppet, {'user_name': 'keystone_user', 'user_password': 'keystone_password', 'tenant_name': 'keystone_tenant'}) #?
|
||||
signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
|
||||
signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
|
||||
signals.connect(mariadb_service1, cinder_db, {
|
||||
'port': 'login_port',
|
||||
'root_password': 'login_password',
|
||||
'root_user': 'login_user',
|
||||
'ip' : 'db_host'})
|
||||
signals.connect(mariadb_service1, cinder_db_user, {'port': 'login_port', 'root_password': 'login_password'})
|
||||
signals.connect(cinder_db, cinder_db_user, {'db_name', 'db_host'})
|
||||
signals.connect(cinder_db_user, cinder_puppet, {
|
||||
'user_name':'db_user',
|
||||
'db_name':'db_name',
|
||||
'user_password':'db_password',
|
||||
'db_host' : 'db_host'})
|
||||
signals.connect(keystone_puppet, cinder_puppet, {'ip': 'keystone_host', 'admin_port': 'keystone_port'}) #or non admin port?
|
||||
signals.connect(services_tenant, cinder_keystone_user)
|
||||
signals.connect(cinder_keystone_user, cinder_keystone_role)
|
||||
signals.connect(cinder_keystone_user, cinder_puppet, {'user_name': 'keystone_user', 'tenant_name': 'keystone_tenant', 'user_password': 'keystone_password'})
|
||||
signals.connect(mariadb_service1, cinder_puppet, {'ip':'ip'})
|
||||
signals.connect(cinder_puppet, cinder_keystone_service_endpoint, {
|
||||
'ssh_key': 'ssh_key', 'ssh_user': 'ssh_user',
|
||||
'ip': ['ip', 'keystone_host', 'admin_ip', 'internal_ip', 'public_ip'],
|
||||
'port': ['admin_port', 'internal_port', 'public_port'],})
|
||||
signals.connect(keystone_puppet, cinder_keystone_service_endpoint, {
|
||||
'admin_port': 'keystone_admin_port', 'admin_token': 'admin_token'})
|
||||
|
||||
# CINDER API
|
||||
cinder_api_puppet = vr.create('cinder_api_puppet', 'resources/cinder_api_puppet', {})[0]
|
||||
signals.connect(node1, cinder_api_puppet)
|
||||
signals.connect(cinder_puppet, cinder_api_puppet, {
|
||||
'keystone_password', 'keystone_tenant', 'keystone_user'})
|
||||
signals.connect(cinder_puppet, cinder_api_puppet, {
|
||||
'keystone_host': 'keystone_auth_host',
|
||||
'keystone_port': 'keystone_auth_port'})
|
||||
|
||||
# signals.connect(node1, cinder_db)
|
||||
# signals.connect(node1, cinder_db_user)
|
||||
# signals.connect(node1, cinder_puppet)
|
||||
# signals.connect(rabbitmq_service1, cinder_puppet, {'ip': 'rabbit_host', 'port': 'rabbit_port'})
|
||||
# signals.connect(openstack_vhost, cinder_puppet, {'vhost_name': 'rabbit_virtual_host'})
|
||||
# signals.connect(openstack_rabbitmq_user, cinder_puppet, {'user_name': 'rabbit_userid', 'password': 'rabbit_password'})
|
||||
# signals.connect(mariadb_service1, cinder_db, {
|
||||
# 'port': 'login_port', 'root_password': 'login_password'})
|
||||
# signals.connect(mariadb_service1, cinder_db_user, {
|
||||
# 'port': 'login_port', 'root_password': 'login_password'})
|
||||
# signals.connect(cinder_db, cinder_db_user, {'db_name': 'db_name'})
|
||||
|
||||
# signals.connect(services_tenant, cinder_keystone_user)
|
||||
# signals.connect(cinder_keystone_user, cinder_keystone_role)
|
||||
# CINDER SCHEDULER
|
||||
cinder_scheduler_puppet = vr.create('cinder_scheduler_puppet', 'resources/cinder_scheduler_puppet', {})[0]
|
||||
signals.connect(node1, cinder_scheduler_puppet)
|
||||
signals.connect(cinder_puppet, cinder_scheduler_puppet)
|
||||
|
||||
# CINDER VOLUME
|
||||
cinder_volume_puppet = vr.create('cinder_volume_puppet', 'resources/cinder_volume_puppet', {})[0]
|
||||
signals.connect(node1, cinder_volume_puppet)
|
||||
signals.connect(cinder_puppet, cinder_volume_puppet)
|
||||
|
||||
# NOVA
|
||||
nova_api = vr.create('nova_api', 'resources/nova_api_puppet', {})[0]
|
||||
nova_db = vr.create('nova_db', 'resources/mariadb_db/', {
|
||||
@ -343,21 +384,22 @@ def deploy():
|
||||
actions.resource_action(admin_role, 'run')
|
||||
|
||||
actions.resource_action(keystone_service_endpoint, 'run')
|
||||
|
||||
actions.resource_action(services_tenant, 'run')
|
||||
|
||||
actions.resource_action(neutron_keystone_user, 'run')
|
||||
actions.resource_action(neutron_keystone_role, 'run')
|
||||
|
||||
actions.resource_action(neutron_puppet, 'run')
|
||||
actions.resource_action(neutron_keystone_service_endpoint, 'run')
|
||||
|
||||
# actions.resource_action(cinder_db, 'run')
|
||||
# actions.resource_action(cinder_db_user, 'run')
|
||||
# actions.resource_action(cinder_keystone_user, 'run')
|
||||
# actions.resource_action(cinder_keystone_role, 'run')
|
||||
|
||||
# actions.resource_action(cinder_puppet, 'run')
|
||||
|
||||
actions.resource_action(cinder_db, 'run')
|
||||
actions.resource_action(cinder_db_user, 'run')
|
||||
actions.resource_action(cinder_keystone_user, 'run')
|
||||
actions.resource_action(cinder_keystone_role, 'run')
|
||||
actions.resource_action(cinder_puppet, 'run')
|
||||
actions.resource_action(cinder_keystone_service_endpoint, 'run')
|
||||
actions.resource_action(cinder_api_puppet, 'run')
|
||||
actions.resource_action(cinder_scheduler_puppet, 'run')
|
||||
actions.resource_action(cinder_volume_puppet, 'run')
|
||||
actions.resource_action(nova_db, 'run')
|
||||
actions.resource_action(nova_db_user, 'run')
|
||||
actions.resource_action(nova_keystone_user, 'run')
|
||||
@ -377,6 +419,15 @@ def undeploy():
|
||||
'nova_db_user',
|
||||
'nova_keystone_service_endpoint',
|
||||
'nova_api',
|
||||
'cinder_volume_puppet',
|
||||
'cinder_scheduler_puppet',
|
||||
'cinder_api_puppet',
|
||||
'cinder_keystone_service_endpoint',
|
||||
'cinder_puppet',
|
||||
'cinder_keystone_role',
|
||||
'cinder_keystone_user',
|
||||
'cinder_db_user',
|
||||
'cinder_db',
|
||||
'neutron_keystone_service_endpoint',
|
||||
'neutron_puppet',
|
||||
'neutron_keystone_role',
|
||||
@ -400,7 +451,10 @@ def undeploy():
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in to_remove:
|
||||
actions.resource_action(resources[name], 'remove')
|
||||
try:
|
||||
actions.resource_action(resources[name], 'remove')
|
||||
except errors.SolarError as e:
|
||||
print 'WARNING: %s' % str(e)
|
||||
|
||||
#actions.resource_action(resources['nova_keystone_service_endpoint'], 'remove' )
|
||||
# actions.resource_action(resources['nova_network_puppet'], 'remove' )
|
||||
|
98
resources/cinder_api_puppet/README.md
Normal file
98
resources/cinder_api_puppet/README.md
Normal file
@ -0,0 +1,98 @@
|
||||
# Cinder API resource for puppet handler
|
||||
|
||||
Setup and configure the cinder API endpoint
|
||||
|
||||
## Parameters
|
||||
|
||||
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/api.pp
|
||||
|
||||
``keystone_password``
|
||||
The password to use for authentication (keystone)
|
||||
|
||||
``keystone_enabled``
|
||||
(optional) Use keystone for authentification
|
||||
Defaults to true
|
||||
|
||||
``keystone_tenant``
|
||||
(optional) The tenant of the auth user
|
||||
Defaults to services
|
||||
|
||||
``keystone_user``
|
||||
(optional) The name of the auth user
|
||||
Defaults to cinder
|
||||
|
||||
``keystone_auth_host``
|
||||
(optional) The keystone host
|
||||
Defaults to localhost
|
||||
|
||||
``keystone_auth_port``
|
||||
(optional) The keystone auth port
|
||||
Defaults to 35357
|
||||
|
||||
``keystone_auth_protocol``
|
||||
(optional) The protocol used to access the auth host
|
||||
Defaults to http.
|
||||
|
||||
``os_region_name``
|
||||
(optional) Some operations require cinder to make API requests
|
||||
to Nova. This sets the keystone region to be used for these
|
||||
requests. For example, boot-from-volume.
|
||||
Defaults to undef.
|
||||
|
||||
``keystone_auth_admin_prefix``
|
||||
(optional) The admin_prefix used to admin endpoint of the auth host
|
||||
This allow admin auth URIs like http://auth_host:35357/keystone.
|
||||
(where '/keystone' is the admin prefix)
|
||||
Defaults to false for empty. If defined, should be a string with a
|
||||
leading '/' and no trailing '/'.
|
||||
|
||||
``service_port``
|
||||
(optional) The cinder api port
|
||||
Defaults to 5000
|
||||
|
||||
``service_workers``
|
||||
(optional) Number of cinder-api workers
|
||||
Defaults to $::processorcount
|
||||
|
||||
``package_ensure``
|
||||
(optional) The state of the package
|
||||
Defaults to present
|
||||
|
||||
``bind_host``
|
||||
(optional) The cinder api bind address
|
||||
Defaults to 0.0.0.0
|
||||
|
||||
``ratelimits``
|
||||
(optional) The state of the service
|
||||
Defaults to undef. If undefined the default ratelimiting values are used.
|
||||
|
||||
``ratelimits_factory``
|
||||
(optional) Factory to use for ratelimiting
|
||||
Defaults to 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
|
||||
|
||||
``default_volume_type``
|
||||
(optional) default volume type to use.
|
||||
This should contain the name of the default volume type to use.
|
||||
If not configured, it produces an error when creating a volume
|
||||
without specifying a type.
|
||||
Defaults to 'false'.
|
||||
|
||||
``validate``
|
||||
(optional) Whether to validate the service is working after any service refreshes
|
||||
Defaults to false
|
||||
|
||||
``validation_options``
|
||||
(optional) Service validation options
|
||||
Should be a hash of options defined in openstacklib::service_validation
|
||||
If empty, defaults values are taken from openstacklib function.
|
||||
Default command list volumes.
|
||||
Require validate set at True.
|
||||
Example:
|
||||
glance::api::validation_options:
|
||||
glance-api:
|
||||
command: check_cinder-api.py
|
||||
path: /usr/bin:/bin:/usr/sbin:/sbin
|
||||
provider: shell
|
||||
tries: 5
|
||||
try_sleep: 10
|
||||
Defaults to {}
|
12
resources/cinder_api_puppet/actions/remove.pp
Normal file
12
resources/cinder_api_puppet/actions/remove.pp
Normal file
@ -0,0 +1,12 @@
|
||||
class {'cinder::api':
|
||||
enabled => false,
|
||||
package_ensure => 'absent',
|
||||
keystone_password => 'not important as removed',
|
||||
}
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => 'absent',
|
||||
name => $::cinder::params::package_name,
|
||||
}
|
52
resources/cinder_api_puppet/actions/run.pp
Normal file
52
resources/cinder_api_puppet/actions/run.pp
Normal file
@ -0,0 +1,52 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$keystone_enabled = $resource['input']['keystone_enabled']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$keystone_auth_host = $resource['input']['keystone_auth_host']['value']
|
||||
$keystone_auth_port = $resource['input']['keystone_auth_port']['value']
|
||||
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']['value']
|
||||
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']['value']
|
||||
$keystone_auth_uri = $resource['input']['keystone_auth_uri']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$service_port = $resource['input']['service_port']['value']
|
||||
$service_workers = $resource['input']['service_workers']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$ratelimits = $resource['input']['ratelimits']['value']
|
||||
$default_volume_type = $resource['input']['default_volume_type']['value']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => $package_ensure,
|
||||
name => $::cinder::params::package_name,
|
||||
} ->
|
||||
|
||||
class {'cinder::api':
|
||||
keystone_password => $keystone_password,
|
||||
keystone_enabled => $keystone_enabled,
|
||||
keystone_tenant => $keystone_tenant,
|
||||
keystone_user => $keystone_user,
|
||||
keystone_auth_host => $keystone_auth_host,
|
||||
keystone_auth_port => $keystone_auth_port,
|
||||
keystone_auth_protocol => $keystone_auth_protocol,
|
||||
keystone_auth_admin_prefix => $keystone_auth_admin_prefix,
|
||||
keystone_auth_uri => $keystone_auth_uri,
|
||||
os_region_name => $os_region_name,
|
||||
service_port => $service_port,
|
||||
service_workers => $service_workers,
|
||||
package_ensure => $package_ensure,
|
||||
bind_host => $bind_host,
|
||||
enabled => true,
|
||||
manage_service => true,
|
||||
ratelimits => $ratelimits,
|
||||
default_volume_type => $default_volume_type,
|
||||
ratelimits_factory => $ratelimits_factory,
|
||||
validate => $validate,
|
||||
validation_options => $validation_options,
|
||||
}
|
78
resources/cinder_api_puppet/meta.yaml
Normal file
78
resources/cinder_api_puppet/meta.yaml
Normal file
@ -0,0 +1,78 @@
|
||||
id: cinder_api_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder_api
|
||||
version: 1.0.0
|
||||
input:
|
||||
keystone_password:
|
||||
schema: str!
|
||||
value: 'keystone'
|
||||
keystone_enabled:
|
||||
schema: bool
|
||||
value: true
|
||||
keystone_tenant:
|
||||
schema: str
|
||||
value: 'services'
|
||||
keystone_user:
|
||||
schema: str
|
||||
value: 'cinder'
|
||||
keystone_auth_host:
|
||||
schema: str
|
||||
value: 'localhost'
|
||||
keystone_auth_port:
|
||||
schema: int
|
||||
value: 35357
|
||||
keystone_auth_protocol:
|
||||
schema: str
|
||||
value: 'http'
|
||||
keystone_auth_admin_prefix:
|
||||
schema: bool
|
||||
value: false
|
||||
keystone_auth_uri:
|
||||
schema: bool
|
||||
value: false
|
||||
os_region_name:
|
||||
schema: str
|
||||
value: ''
|
||||
service_port:
|
||||
schema: int
|
||||
value: 5000
|
||||
service_workers:
|
||||
schema: int
|
||||
value: 1
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
bind_host:
|
||||
schema: str
|
||||
value: '0.0.0.0'
|
||||
ratelimits:
|
||||
schema: str
|
||||
value: ''
|
||||
default_volume_type:
|
||||
schema: bool
|
||||
value: false
|
||||
ratelimits_factory:
|
||||
schema: str
|
||||
value: 'cinder.api.v1.limits:RateLimitingMiddleware.factory'
|
||||
validate:
|
||||
schema: bool
|
||||
value: false
|
||||
validation_options:
|
||||
schema: {}
|
||||
value: {}
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_api_service, resources/cinder_api, resources/cinder]
|
10
resources/cinder_api_puppet/test.py
Normal file
10
resources/cinder_api_puppet/test.py
Normal file
@ -0,0 +1,10 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_api_puppet')
|
||||
requests.get(
|
||||
'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
)
|
112
resources/cinder_puppet/README.md
Normal file
112
resources/cinder_puppet/README.md
Normal file
@ -0,0 +1,112 @@
|
||||
# Cinder resource for puppet handler
|
||||
|
||||
Controlls a live cycle of the cinder entities,
|
||||
like the main puppet class, auth, DB, AMQP, packages,
|
||||
keystone user, role and endpoint.
|
||||
|
||||
# Parameters
|
||||
|
||||
source https://github.com/openstack/puppet-cinder/blob/5.1.0/manifests/init.pp
|
||||
|
||||
``database_connection``
|
||||
Url used to connect to database.
|
||||
(Optional) Defaults to
|
||||
'sqlite:////var/lib/cinder/cinder.sqlite'
|
||||
|
||||
``database_idle_timeout``
|
||||
Timeout when db connections should be reaped.
|
||||
(Optional) Defaults to 3600.
|
||||
|
||||
``database_min_pool_size``
|
||||
Minimum number of SQL connections to keep open in a pool.
|
||||
(Optional) Defaults to 1.
|
||||
|
||||
``database_max_pool_size``
|
||||
Maximum number of SQL connections to keep open in a pool.
|
||||
(Optional) Defaults to undef.
|
||||
|
||||
``database_max_retries``
|
||||
Maximum db connection retries during startup.
|
||||
Setting -1 implies an infinite retry count.
|
||||
(Optional) Defaults to 10.
|
||||
|
||||
``database_retry_interval``
|
||||
Interval between retries of opening a sql connection.
|
||||
(Optional) Defaults to 10.
|
||||
|
||||
``database_max_overflow``
|
||||
If set, use this value for max_overflow with sqlalchemy.
|
||||
(Optional) Defaults to undef.
|
||||
|
||||
``rabbit_use_ssl``
|
||||
(optional) Connect over SSL for RabbitMQ
|
||||
Defaults to false
|
||||
|
||||
``kombu_ssl_ca_certs``
|
||||
(optional) SSL certification authority file (valid only if SSL enabled).
|
||||
Defaults to undef
|
||||
|
||||
``kombu_ssl_certfile``
|
||||
(optional) SSL cert file (valid only if SSL enabled).
|
||||
Defaults to undef
|
||||
|
||||
``kombu_ssl_keyfile``
|
||||
(optional) SSL key file (valid only if SSL enabled).
|
||||
Defaults to undef
|
||||
|
||||
``kombu_ssl_version``
|
||||
(optional) SSL version to use (valid only if SSL enabled).
|
||||
Valid values are TLSv1, SSLv23 and SSLv3. SSLv2 may be
|
||||
available on some distributions.
|
||||
Defaults to 'TLSv1'
|
||||
|
||||
``amqp_durable_queues``
|
||||
Use durable queues in amqp.
|
||||
(Optional) Defaults to false.
|
||||
|
||||
``use_syslog``
|
||||
Use syslog for logging.
|
||||
(Optional) Defaults to false.
|
||||
|
||||
``log_facility``
|
||||
Syslog facility to receive log lines.
|
||||
(Optional) Defaults to LOG_USER.
|
||||
|
||||
``log_dir``
|
||||
(optional) Directory where logs should be stored.
|
||||
If set to boolean false, it will not log to any directory.
|
||||
Defaults to '/var/log/cinder'
|
||||
|
||||
``use_ssl``
|
||||
(optional) Enable SSL on the API server
|
||||
Defaults to false, not set
|
||||
|
||||
``cert_file``
|
||||
(optinal) Certificate file to use when starting API server securely
|
||||
Defaults to false, not set
|
||||
|
||||
``key_file``
|
||||
(optional) Private key file to use when starting API server securely
|
||||
Defaults to false, not set
|
||||
|
||||
``ca_file``
|
||||
(optional) CA certificate file to use to verify connecting clients
|
||||
Defaults to false, not set_
|
||||
|
||||
``mysql_module``
|
||||
(optional) Deprecated. Does nothing.
|
||||
|
||||
``storage_availability_zone``
|
||||
(optional) Availability zone of the node.
|
||||
Defaults to 'nova'
|
||||
|
||||
``default_availability_zone``
|
||||
(optional) Default availability zone for new volumes.
|
||||
If not set, the storage_availability_zone option value is used as
|
||||
the default for new volumes.
|
||||
Defaults to false
|
||||
|
||||
``sql_connection``
|
||||
DEPRECATED
|
||||
``sql_idle_timeout``
|
||||
DEPRECATED
|
4
resources/cinder_puppet/actions/remove.pp
Normal file
4
resources/cinder_puppet/actions/remove.pp
Normal file
@ -0,0 +1,4 @@
|
||||
class {'cinder':
|
||||
package_ensure => 'absent',
|
||||
rabbit_password => 'not important as removed',
|
||||
}
|
114
resources/cinder_puppet/actions/run.pp
Normal file
114
resources/cinder_puppet/actions/run.pp
Normal file
@ -0,0 +1,114 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$database_min_pool_size = $resource['input']['database_min_pool_size']['value']
|
||||
$database_max_pool_size = $resource['input']['database_max_pool_size']['value']
|
||||
$database_max_retries = $resource['input']['database_max_retries']['value']
|
||||
$database_retry_interval = $resource['input']['database_retry_interval']['value']
|
||||
$database_max_overflow = $resource['input']['database_max_overflow']['value']
|
||||
$rpc_backend = $resource['input']['rpc_backend']['value']
|
||||
$control_exchange = $resource['input']['control_exchange']['value']
|
||||
$rabbit_host = $resource['input']['rabbit_host']['value']
|
||||
$rabbit_port = $resource['input']['rabbit_port']['value']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']['value']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value']
|
||||
$rabbit_userid = $resource['input']['rabbit_userid']['value']
|
||||
$rabbit_password = $resource['input']['rabbit_password']['value']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value']
|
||||
$amqp_durable_queues = $resource['input']['amqp_durable_queues']['value']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']['value']
|
||||
$qpid_port = $resource['input']['qpid_port']['value']
|
||||
$qpid_username = $resource['input']['qpid_username']['value']
|
||||
$qpid_password = $resource['input']['qpid_password']['value']
|
||||
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']['value']
|
||||
$qpid_reconnect = $resource['input']['qpid_reconnect']['value']
|
||||
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']['value']
|
||||
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']['value']
|
||||
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']['value']
|
||||
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']['value']
|
||||
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']['value']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']['value']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$use_ssl = $resource['input']['use_ssl']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$api_paste_config = $resource['input']['api_paste_config']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$storage_availability_zone = $resource['input']['storage_availability_zone']['value']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
# Do not apply the legacy stuff
|
||||
#$sql_connection = $resource['input']['sql_connection']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
|
||||
class {'cinder':
|
||||
database_connection => "mysql://${db_user}:${db_password}@${ip}/${db_name}",
|
||||
database_idle_timeout => $database_idle_timeout,
|
||||
database_min_pool_size => $database_min_pool_size,
|
||||
database_max_pool_size => $database_max_pool_size,
|
||||
database_max_retries => $database_max_retries,
|
||||
database_retry_interval => $database_retry_interval,
|
||||
database_max_overflow => $database_max_overflow,
|
||||
rpc_backend => $rpc_backend,
|
||||
control_exchange => $control_exchange,
|
||||
rabbit_host => $rabbit_host,
|
||||
rabbit_port => $rabbit_port,
|
||||
rabbit_hosts => $rabbit_hosts,
|
||||
rabbit_virtual_host => $rabbit_virtual_host,
|
||||
rabbit_userid => $rabbit_userid,
|
||||
rabbit_password => $rabbit_password,
|
||||
rabbit_use_ssl => $rabbit_use_ssl,
|
||||
kombu_ssl_ca_certs => $kombu_ssl_ca_certs,
|
||||
kombu_ssl_certfile => $kombu_ssl_certfile,
|
||||
kombu_ssl_keyfile => $kombu_ssl_keyfile,
|
||||
kombu_ssl_version => $kombu_ssl_version,
|
||||
amqp_durable_queues => $amqp_durable_queues,
|
||||
qpid_hostname => $qpid_hostname,
|
||||
qpid_port => $qpid_port,
|
||||
qpid_username => $qpid_username,
|
||||
qpid_password => $qpid_password,
|
||||
qpid_sasl_mechanisms => $qpid_sasl_mechanisms,
|
||||
qpid_reconnect => $qpid_reconnect,
|
||||
qpid_reconnect_timeout => $qpid_reconnect_timeout,
|
||||
qpid_reconnect_limit => $qpid_reconnect_limit,
|
||||
qpid_reconnect_interval_min => $qpid_reconnect_interval_min,
|
||||
qpid_reconnect_interval_max => $qpid_reconnect_interval_max,
|
||||
qpid_reconnect_interval => $qpid_reconnect_interval,
|
||||
qpid_heartbeat => $qpid_heartbeat,
|
||||
qpid_protocol => $qpid_protocol,
|
||||
qpid_tcp_nodelay => $qpid_tcp_nodelay,
|
||||
package_ensure => $package_ensure,
|
||||
use_ssl => $use_ssl,
|
||||
ca_file => $ca_file,
|
||||
cert_file => $cert_file,
|
||||
key_file => $key_file,
|
||||
api_paste_config => $api_paste_config,
|
||||
use_syslog => $use_syslog,
|
||||
log_facility => $log_facility,
|
||||
log_dir => $log_dir,
|
||||
verbose => $verbose,
|
||||
debug => $debug,
|
||||
storage_availability_zone => $storage_availability_zone,
|
||||
default_availability_zone => $default_availability_zone,
|
||||
mysql_module => $mysql_module,
|
||||
sql_connection => $sql_connection,
|
||||
sql_idle_timeout => $sql_idle_timeout,
|
||||
}
|
211
resources/cinder_puppet/meta.yaml
Normal file
211
resources/cinder_puppet/meta.yaml
Normal file
@ -0,0 +1,211 @@
|
||||
id: cinder_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder
|
||||
version: 1.0.0
|
||||
input:
|
||||
database_connection:
|
||||
schema: str
|
||||
value: 'sqlite:////var/lib/cinder/cinder.sqlite'
|
||||
database_idle_timeout:
|
||||
schema: int
|
||||
value: 3600
|
||||
database_min_pool_size:
|
||||
schema: int
|
||||
value: 1
|
||||
database_max_pool_size:
|
||||
schema: str
|
||||
value: ''
|
||||
database_max_retries:
|
||||
schema: int
|
||||
value: 10
|
||||
database_retry_interval:
|
||||
schema: int
|
||||
value: 10
|
||||
database_max_overflow:
|
||||
schema: str
|
||||
value: ''
|
||||
rpc_backend:
|
||||
schema: str
|
||||
value: 'cinder.openstack.common.rpc.impl_kombu'
|
||||
control_exchange:
|
||||
schema: str
|
||||
value: 'openstack'
|
||||
rabbit_host:
|
||||
schema: str
|
||||
value: '127.0.0.1'
|
||||
rabbit_port:
|
||||
schema: int
|
||||
value: 5672
|
||||
rabbit_hosts:
|
||||
schema: bool
|
||||
value: false
|
||||
rabbit_virtual_host:
|
||||
schema: str
|
||||
value: '/'
|
||||
rabbit_userid:
|
||||
schema: str
|
||||
value: 'guest'
|
||||
rabbit_password:
|
||||
schema: str!
|
||||
value: 'rabbit'
|
||||
rabbit_use_ssl:
|
||||
schema: bool
|
||||
value: false
|
||||
kombu_ssl_ca_certs:
|
||||
schema: str
|
||||
value: ''
|
||||
kombu_ssl_certfile:
|
||||
schema: str
|
||||
value: ''
|
||||
kombu_ssl_keyfile:
|
||||
schema: str
|
||||
value: ''
|
||||
kombu_ssl_version:
|
||||
schema: str
|
||||
value: 'TLSv1'
|
||||
amqp_durable_queues:
|
||||
schema: bool
|
||||
value: false
|
||||
qpid_hostname:
|
||||
schema: str
|
||||
value: 'localhost'
|
||||
qpid_port:
|
||||
schema: int
|
||||
value: 5672
|
||||
qpid_username:
|
||||
schema: str
|
||||
value: 'guest'
|
||||
qpid_password:
|
||||
schema: str!
|
||||
value: 'qpid'
|
||||
qpid_sasl_mechanisms:
|
||||
schema: bool
|
||||
value: false
|
||||
qpid_reconnect:
|
||||
schema: bool
|
||||
value: true
|
||||
qpid_reconnect_timeout:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_limit:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_interval_min:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_interval_max:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_reconnect_interval:
|
||||
schema: int
|
||||
value: 0
|
||||
qpid_heartbeat:
|
||||
schema: int
|
||||
value: 60
|
||||
qpid_protocol:
|
||||
schema: str
|
||||
value: 'tcp'
|
||||
qpid_tcp_nodelay:
|
||||
schema: bool
|
||||
value: true
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
use_ssl:
|
||||
schema: bool
|
||||
value: false
|
||||
ca_file:
|
||||
schema: bool
|
||||
value: false
|
||||
cert_file:
|
||||
schema: bool
|
||||
value: false
|
||||
key_file:
|
||||
schema: bool
|
||||
value: false
|
||||
api_paste_config:
|
||||
schema: str
|
||||
value: '/etc/cinder/api-paste.ini'
|
||||
use_syslog:
|
||||
schema: bool
|
||||
value: false
|
||||
log_facility:
|
||||
schema: str
|
||||
value: 'LOG_USER'
|
||||
log_dir:
|
||||
schema: str
|
||||
value: '/var/log/cinder'
|
||||
verbose:
|
||||
schema: bool
|
||||
value: false
|
||||
debug:
|
||||
schema: bool
|
||||
value: false
|
||||
storage_availability_zone:
|
||||
schema: str
|
||||
value: 'nova'
|
||||
default_availability_zone:
|
||||
schema: bool
|
||||
value: false
|
||||
mysql_module:
|
||||
schema: str
|
||||
value: ''
|
||||
sql_connection:
|
||||
schema: str
|
||||
value: ''
|
||||
sql_idle_timeout:
|
||||
schema: str
|
||||
value: ''
|
||||
|
||||
db_user:
|
||||
schema: str!
|
||||
value: cinder
|
||||
db_password:
|
||||
schema: str!
|
||||
value: cinder
|
||||
db_name:
|
||||
schema: str!
|
||||
value: cinder
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
port:
|
||||
schema: int!
|
||||
value: 8776
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value: ''
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value: ''
|
||||
keystone_user:
|
||||
schema: str!
|
||||
value: ''
|
||||
keystone_password:
|
||||
schema: str!
|
||||
value: ''
|
||||
keystone_tenant:
|
||||
schema: str!
|
||||
value: ''
|
||||
|
||||
# forge:
|
||||
# schema: str!
|
||||
# value: 'stackforge-cinder'
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_service, resources/cinder]
|
10
resources/cinder_puppet/test.py
Normal file
10
resources/cinder_puppet/test.py
Normal file
@ -0,0 +1,10 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_puppet')
|
||||
requests.get(
|
||||
'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
)
|
3
resources/cinder_scheduler_puppet/README.md
Normal file
3
resources/cinder_scheduler_puppet/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Cinder Scheduler resource for puppet handler
|
||||
|
||||
Setup and configure the cinder scheduler service
|
4
resources/cinder_scheduler_puppet/actions/remove.pp
Normal file
4
resources/cinder_scheduler_puppet/actions/remove.pp
Normal file
@ -0,0 +1,4 @@
|
||||
class {'cinder::scheduler':
|
||||
enabled => false,
|
||||
package_ensure => 'absent',
|
||||
}
|
18
resources/cinder_scheduler_puppet/actions/run.pp
Normal file
18
resources/cinder_scheduler_puppet/actions/run.pp
Normal file
@ -0,0 +1,18 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$scheduler_driver = $resource['input']['scheduler_driver']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => $package_ensure,
|
||||
name => $::cinder::params::package_name,
|
||||
} ->
|
||||
|
||||
class {'cinder::scheduler':
|
||||
scheduler_driver => $scheduler_driver,
|
||||
package_ensure => $package_ensure,
|
||||
enabled => true,
|
||||
manage_service => true,
|
||||
}
|
27
resources/cinder_scheduler_puppet/meta.yaml
Normal file
27
resources/cinder_scheduler_puppet/meta.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
id: cinder_scheduler_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder_scheduler
|
||||
version: 1.0.0
|
||||
input:
|
||||
scheduler_driver:
|
||||
schema: str
|
||||
value: ''
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_scheduler_service, resources/cinder_scheduler, resources/cinder]
|
12
resources/cinder_scheduler_puppet/test.py
Normal file
12
resources/cinder_scheduler_puppet/test.py
Normal file
@ -0,0 +1,12 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_scheduler_puppet')
|
||||
# requests.get(
|
||||
# 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
# TODO(bogdando) figure out how to test this
|
||||
# http://docs.openstack.org/developer/nova/devref/scheduler.html
|
||||
# )
|
3
resources/cinder_volume_puppet/README.md
Normal file
3
resources/cinder_volume_puppet/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
# Cinder Volume resource for puppet handler
|
||||
|
||||
Setup and configure the cinder volume service
|
4
resources/cinder_volume_puppet/actions/remove.pp
Normal file
4
resources/cinder_volume_puppet/actions/remove.pp
Normal file
@ -0,0 +1,4 @@
|
||||
class {'cinder::volume':
|
||||
enabled => false,
|
||||
package_ensure => 'absent',
|
||||
}
|
16
resources/cinder_volume_puppet/actions/run.pp
Normal file
16
resources/cinder_volume_puppet/actions/run.pp
Normal file
@ -0,0 +1,16 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
|
||||
include cinder::params
|
||||
|
||||
package { 'cinder':
|
||||
ensure => $package_ensure,
|
||||
name => $::cinder::params::package_name,
|
||||
} ->
|
||||
|
||||
class {'cinder::volume':
|
||||
package_ensure => $package_ensure,
|
||||
enabled => true,
|
||||
manage_service => true,
|
||||
}
|
24
resources/cinder_volume_puppet/meta.yaml
Normal file
24
resources/cinder_volume_puppet/meta.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
id: cinder_volume_puppet
|
||||
handler: puppet
|
||||
puppet_module: cinder_volume
|
||||
version: 1.0.0
|
||||
input:
|
||||
package_ensure:
|
||||
schema: str
|
||||
value: 'present'
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/openstack/puppet-cinder', branch: '5.1.0'}
|
||||
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/cinder_volume_service, resources/cinder_volume, resources/cinder]
|
12
resources/cinder_volume_puppet/test.py
Normal file
12
resources/cinder_volume_puppet/test.py
Normal file
@ -0,0 +1,12 @@
|
||||
import requests
|
||||
|
||||
from solar.core.log import log
|
||||
|
||||
|
||||
def test(resource):
|
||||
log.debug('Testing cinder_volume_puppet')
|
||||
# requests.get(
|
||||
# 'http://%s:%s' % (resource.args['ip'].value, resource.args['port'].value)
|
||||
# TODO(bogdando) figure out how to test this
|
||||
# http://docs.openstack.org/developer/nova/devref/volume.html
|
||||
# )
|
@ -3,6 +3,7 @@ $resource = hiera($::resource_name)
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$admin_token = $resource['input']['admin_token']['value']
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$admin_port = $resource['input']['admin_port']['value']
|
||||
@ -13,8 +14,9 @@ class {'keystone':
|
||||
verbose => true,
|
||||
catalog_type => 'sql',
|
||||
admin_token => $admin_token,
|
||||
database_connection => "mysql://$db_user:$db_password@$ip/$db_name",
|
||||
database_connection => "mysql://$db_user:$db_password@$db_host/$db_name",
|
||||
public_port => "$port",
|
||||
admin_port => "$admin_port",
|
||||
token_driver => 'keystone.token.backends.kvs.Token'
|
||||
}
|
||||
|
||||
|
@ -8,13 +8,16 @@ input:
|
||||
value: admin_token
|
||||
db_user:
|
||||
schema: str!
|
||||
value: keystone
|
||||
value:
|
||||
db_password:
|
||||
schema: str!
|
||||
value: keystone
|
||||
value:
|
||||
db_name:
|
||||
schema: str!
|
||||
value: keystone
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
admin_port:
|
||||
schema: int!
|
||||
|
@ -8,4 +8,4 @@
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -8,4 +8,4 @@
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -8,6 +8,9 @@ input:
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
login_user:
|
||||
schema: str!
|
||||
|
@ -1,11 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb db
|
||||
mysql_db:
|
||||
name: {{db_name}}
|
||||
state: absent
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
@ -1,13 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb db
|
||||
mysql_db:
|
||||
name: {{ db_name }}
|
||||
#collation: utf8_encode_ci
|
||||
encoding: utf8
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
@ -1,30 +0,0 @@
|
||||
id: mariadb_keystone_db
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
actions:
|
||||
run: run.yml
|
||||
remove: remove.yml
|
||||
input:
|
||||
db_name:
|
||||
schema: str!
|
||||
value: keystone_db
|
||||
login_user:
|
||||
schema: str!
|
||||
value: root
|
||||
login_password:
|
||||
schema: str!
|
||||
value:
|
||||
login_port:
|
||||
schema: int!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/mariadb_keystone_db, resources/mariadb]
|
@ -1,11 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb user
|
||||
mysql_user:
|
||||
name: {{new_user_name}}
|
||||
state: absent
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
@ -1,14 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: mariadb user
|
||||
mysql_user:
|
||||
name: {{ new_user_name }}
|
||||
password: {{ new_user_password }}
|
||||
priv: {{ db_name }}.*:ALL
|
||||
host: '%'
|
||||
state: present
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
@ -1,37 +0,0 @@
|
||||
id: mariadb_keystone_user
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
actions:
|
||||
run: run.yml
|
||||
remove: remove.yml
|
||||
input:
|
||||
new_user_password:
|
||||
schema: str!
|
||||
value: keystone
|
||||
new_user_name:
|
||||
schema: str!
|
||||
value: keystone
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
login_password:
|
||||
schema: str!
|
||||
value:
|
||||
login_port:
|
||||
schema: int!
|
||||
value:
|
||||
login_user:
|
||||
schema: str!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/mariadb_keystone_user, resources/mariadb]
|
||||
|
@ -8,4 +8,4 @@
|
||||
login_user: root
|
||||
login_password: {{login_password}}
|
||||
login_port: {{login_port}}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -11,4 +11,4 @@
|
||||
login_user: root
|
||||
login_password: {{ login_password }}
|
||||
login_port: {{ login_port }}
|
||||
login_host: 127.0.0.1
|
||||
login_host: {{db_host}}
|
||||
|
@ -15,6 +15,9 @@ input:
|
||||
db_name:
|
||||
schema: str!
|
||||
value:
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
login_password:
|
||||
schema: str!
|
||||
|
@ -1,4 +1,4 @@
|
||||
$resource = hiera('{{ resource_name }}')
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
|
||||
@ -6,6 +6,7 @@ $rabbitmq_user = $resource['input']['rabbitmq_user']['value']
|
||||
$rabbitmq_password = $resource['input']['rabbitmq_password']['value']
|
||||
$rabbitmq_host = $resource['input']['rabbitmq_host']['value']
|
||||
$rabbitmq_port = $resource['input']['rabbitmq_port']['value']
|
||||
$rabbitmq_virtual_host = $resource['input']['rabbitmq_virtual_host']['value']
|
||||
|
||||
$keystone_host = $resource['input']['keystone_host']['value']
|
||||
$keystone_port = $resource['input']['keystone_port']['value']
|
||||
@ -23,6 +24,7 @@ class { 'neutron':
|
||||
rabbit_password => $rabbitmq_password,
|
||||
rabbit_host => $rabbitmq_host,
|
||||
rabbit_port => $rabbitmq_port,
|
||||
rabbit_virtual_host => $rabbitmq_virtual_host,
|
||||
service_plugins => ['metering']
|
||||
}
|
||||
|
||||
|
@ -24,6 +24,9 @@ input:
|
||||
rabbitmq_password:
|
||||
schema: str!
|
||||
value: ''
|
||||
rabbitmq_virtual_host:
|
||||
schema: str!
|
||||
value: ''
|
||||
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
|
@ -7,3 +7,4 @@ export OS_PASSWORD={{password}}
|
||||
export OS_AUTH_URL=http://{{keystone_host}}:{{keystone_port}}/v2.0
|
||||
export OS_AUTH_STRATEGY=keystone
|
||||
export OS_REGION_NAME='RegionOne'
|
||||
export OS_VOLUME_API_VERSION='2'
|
@ -13,10 +13,10 @@ input:
|
||||
|
||||
port:
|
||||
schema: int!
|
||||
value: ''
|
||||
value: 5672
|
||||
management_port:
|
||||
schema: int!
|
||||
value: ''
|
||||
value: 15672
|
||||
git:
|
||||
schema: {repository: str!, branch: str!}
|
||||
value: {repository: 'https://github.com/puppetlabs/puppetlabs-rabbitmq.git', branch: '5.1.0'}
|
||||
|
@ -16,7 +16,7 @@ fi
|
||||
|
||||
. $VENV/bin/activate
|
||||
|
||||
pip install -r solar/requirements.txt --download-cache=/tmp/$JOB_NAME
|
||||
pip install -r solar/test-requirements.txt --download-cache=/tmp/$JOB_NAME
|
||||
|
||||
pushd solar/solar
|
||||
|
||||
|
14
slave_cinder.yml
Normal file
14
slave_cinder.yml
Normal file
@ -0,0 +1,14 @@
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
sudo: yes
|
||||
tasks:
|
||||
- apt: name=lvm2 state=present
|
||||
- command: sudo truncate -s 10G /root/cinder.img creates=/root/cinder.img
|
||||
- shell: sudo losetup -a|grep cinder
|
||||
register: loop_created
|
||||
ignore_errors: True
|
||||
- command: sudo losetup /dev/loop0 /root/cinder.img
|
||||
when: loop_created|failed
|
||||
- lvg: vg=cinder-volumes pvs=/dev/loop0
|
||||
when: loop_created|failed
|
@ -6,7 +6,6 @@ networkx==1.9.1
|
||||
PyYAML==3.11
|
||||
jsonschema==2.4.0
|
||||
requests==2.7.0
|
||||
#mock
|
||||
dictdiffer==0.4.0
|
||||
enum34==1.0.4
|
||||
redis==2.10.3
|
||||
@ -15,3 +14,5 @@ fakeredis
|
||||
inflection
|
||||
Fabric==1.10.2
|
||||
tabulate==0.7.5
|
||||
ansible
|
||||
celery
|
||||
|
@ -9,20 +9,7 @@ import os
|
||||
from solar.core.log import log
|
||||
from solar.core.handlers.base import TempFileHandler
|
||||
from solar.core.provider import GitProvider
|
||||
|
||||
|
||||
# TODO:
|
||||
# puppet wont always return 0 on error, example:
|
||||
# http://unix.stackexchange.com/questions/165333/how-to-get-non-zero-exit-code-from-puppet-when-configuration-cannot-be-applied
|
||||
|
||||
# in fuel there is special handler based on puppet summary, but i think we can also use --detailed-exitcode
|
||||
# https://docs.puppetlabs.com/references/3.6.2/man/agent.html
|
||||
# --detailed-exitcodes
|
||||
# Provide transaction information via exit codes. If this is enabled, an exit
|
||||
# code of '2' means there were changes, an exit code of '4' means there were
|
||||
# failures during the transaction, and an exit code of '6' means there were
|
||||
# both changes and failures.
|
||||
|
||||
from solar import errors
|
||||
|
||||
|
||||
class ResourceSSHMixin(object):
|
||||
@ -48,6 +35,10 @@ class ResourceSSHMixin(object):
|
||||
fabric_api.shell_env(**kwargs['env'])
|
||||
)
|
||||
|
||||
if 'warn_only' in kwargs:
|
||||
managers.append(
|
||||
fabric_api.warn_only())
|
||||
|
||||
with nested(*managers):
|
||||
return executor(' '.join(args))
|
||||
|
||||
@ -161,14 +152,21 @@ class Puppet(ResourceSSHMixin, TempFileHandler):
|
||||
|
||||
self._scp_command(resource, action_file, '/tmp/action.pp')
|
||||
|
||||
self._ssh_command(
|
||||
cmd = self._ssh_command(
|
||||
resource,
|
||||
'puppet', 'apply', '-vd', '/tmp/action.pp',
|
||||
'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes',
|
||||
env={
|
||||
'FACTER_resource_name': resource.name,
|
||||
},
|
||||
use_sudo=True
|
||||
use_sudo=True,
|
||||
warn_only=True,
|
||||
)
|
||||
# 0 - no changes, 2 - successfull changes
|
||||
if cmd.return_code not in [0, 2]:
|
||||
raise errors.SolarError(
|
||||
'Puppet for {} failed with {}'.format(
|
||||
resource.name, cmd.return_code))
|
||||
return cmd
|
||||
|
||||
def clone_manifests(self, resource):
|
||||
git = resource.args['git'].value
|
||||
|
37
solar/solar/orchestration/executor.py
Normal file
37
solar/solar/orchestration/executor.py
Normal file
@ -0,0 +1,37 @@
|
||||
|
||||
from solar.orchestration.runner import app
|
||||
from celery import group
|
||||
|
||||
|
||||
def celery_executor(dg, tasks, control_tasks=()):
|
||||
to_execute = []
|
||||
|
||||
for task_name in tasks:
|
||||
|
||||
# task_id needs to be unique, so for each plan we will use
|
||||
# generated uid of this plan and task_name
|
||||
task_id = '{}:{}'.format(dg.graph['uid'], task_name)
|
||||
task = app.tasks[dg.node[task_name]['type']]
|
||||
|
||||
if all_success(dg, dg.predecessors(task_name)) or task_name in control_tasks:
|
||||
dg.node[task_name]['status'] = 'INPROGRESS'
|
||||
for t in generate_task(task, dg.node[task_name], task_id):
|
||||
to_execute.append(t)
|
||||
return group(to_execute)
|
||||
|
||||
|
||||
def generate_task(task, data, task_id):
|
||||
|
||||
subtask = task.subtask(
|
||||
data['args'], task_id=task_id,
|
||||
time_limit=data.get('time_limit', None),
|
||||
soft_time_limit=data.get('soft_time_limit', None))
|
||||
|
||||
if data.get('target', None):
|
||||
subtask.set(queue=data['target'])
|
||||
|
||||
yield subtask
|
||||
|
||||
|
||||
def all_success(dg, nodes):
|
||||
return all((dg.node[n]['status'] == 'SUCCESS' for n in nodes))
|
63
solar/solar/orchestration/limits.py
Normal file
63
solar/solar/orchestration/limits.py
Normal file
@ -0,0 +1,63 @@
|
||||
|
||||
|
||||
class Chain(object):
|
||||
|
||||
def __init__(self, dg, inprogress, added):
|
||||
self.dg = dg
|
||||
self.inprogress = inprogress
|
||||
self.added = added
|
||||
self.rules = []
|
||||
|
||||
def add_rule(self, rule):
|
||||
self.rules.append(rule)
|
||||
|
||||
@property
|
||||
def filtered(self):
|
||||
for item in self.added:
|
||||
for rule in self.rules:
|
||||
if not rule(self.dg, self.inprogress, item):
|
||||
break
|
||||
else:
|
||||
self.inprogress.append(item)
|
||||
yield item
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.filtered)
|
||||
|
||||
|
||||
def get_default_chain(dg, inprogress, added):
|
||||
chain = Chain(dg, inprogress, added)
|
||||
chain.add_rule(items_rule)
|
||||
chain.add_rule(target_based_rule)
|
||||
chain.add_rule(type_based_rule)
|
||||
return chain
|
||||
|
||||
|
||||
def type_based_rule(dg, inprogress, item):
|
||||
"""condition will be specified like:
|
||||
type_limit: 2
|
||||
"""
|
||||
_type = dg.node[item].get('resource_type')
|
||||
if not 'type_limit' in dg.node[item]: return True
|
||||
if not _type: return True
|
||||
|
||||
type_count = 0
|
||||
for n in inprogress:
|
||||
if dg.node[n].get('resource_type') == _type:
|
||||
type_count += 1
|
||||
return dg.node[item]['type_limit'] > type_count
|
||||
|
||||
|
||||
def target_based_rule(dg, inprogress, item, limit=1):
|
||||
target = dg.node[item].get('target')
|
||||
if not target: return True
|
||||
|
||||
target_count = 0
|
||||
for n in inprogress:
|
||||
if dg.node[n].get('target') == target:
|
||||
target_count += 1
|
||||
return limit > target_count
|
||||
|
||||
|
||||
def items_rule(dg, inprogress, item, limit=100):
|
||||
return len(inprogress) < limit
|
@ -5,8 +5,6 @@ import subprocess
|
||||
import time
|
||||
|
||||
from celery.app import task
|
||||
from celery import group
|
||||
from celery.exceptions import Ignore
|
||||
import redis
|
||||
|
||||
from solar.orchestration import graph
|
||||
@ -14,6 +12,9 @@ from solar.core import actions
|
||||
from solar.core import resource
|
||||
from solar.system_log.tasks import commit_logitem, error_logitem
|
||||
from solar.orchestration.runner import app
|
||||
from solar.orchestration.traversal import traverse
|
||||
from solar.orchestration import limits
|
||||
from solar.orchestration import executor
|
||||
|
||||
|
||||
r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1)
|
||||
@ -23,7 +24,7 @@ __all__ = ['solar_resource', 'cmd', 'sleep',
|
||||
'error', 'fault_tolerance', 'schedule_start', 'schedule_next']
|
||||
|
||||
# NOTE(dshulyak) i am not using celery.signals because it is not possible
|
||||
# to extrace task_id from *task_success* signal
|
||||
# to extract task_id from *task_success* signal
|
||||
class ReportTask(task.Task):
|
||||
|
||||
def on_success(self, retval, task_id, args, kwargs):
|
||||
@ -41,13 +42,13 @@ class ReportTask(task.Task):
|
||||
report_task = partial(app.task, base=ReportTask, bind=True)
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='solar_resource')
|
||||
def solar_resource(ctxt, resource_name, action):
|
||||
res = resource.load(resource_name)
|
||||
return actions.resource_action(res, action)
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='cmd')
|
||||
def cmd(ctxt, cmd):
|
||||
popen = subprocess.Popen(
|
||||
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
|
||||
@ -58,17 +59,17 @@ def cmd(ctxt, cmd):
|
||||
return popen.returncode, out, err
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='sleep')
|
||||
def sleep(ctxt, seconds):
|
||||
time.sleep(seconds)
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='error')
|
||||
def error(ctxt, message):
|
||||
raise Exception('message')
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='fault_tolerance')
|
||||
def fault_tolerance(ctxt, percent):
|
||||
task_id = ctxt.request.id
|
||||
plan_uid, task_name = task_id.rsplit(':', 1)
|
||||
@ -88,12 +89,12 @@ def fault_tolerance(ctxt, percent):
|
||||
succes_percent, percent))
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='echo')
|
||||
def echo(ctxt, message):
|
||||
return message
|
||||
|
||||
|
||||
@report_task
|
||||
@report_task(name='anchor')
|
||||
def anchor(ctxt, *args):
|
||||
# such tasks should be walked when atleast 1/3/exact number of resources visited
|
||||
dg = graph.get_graph('current')
|
||||
@ -103,12 +104,18 @@ def anchor(ctxt, *args):
|
||||
|
||||
|
||||
def schedule(plan_uid, dg):
|
||||
next_tasks = list(traverse(dg))
|
||||
tasks = traverse(dg)
|
||||
limit_chain = limits.get_default_chain(
|
||||
dg,
|
||||
[t for t in dg if dg.node[t]['status'] == 'INPROGRESS'],
|
||||
tasks)
|
||||
execution = executor.celery_executor(
|
||||
dg, limit_chain, control_tasks=('fault_tolerance',))
|
||||
graph.save_graph(plan_uid, dg)
|
||||
group(next_tasks)()
|
||||
execution()
|
||||
|
||||
|
||||
@app.task
|
||||
@app.task(name='schedule_start')
|
||||
def schedule_start(plan_uid, start=None, end=None):
|
||||
"""On receive finished task should update storage with task result:
|
||||
|
||||
@ -119,7 +126,7 @@ def schedule_start(plan_uid, start=None, end=None):
|
||||
schedule(plan_uid, dg)
|
||||
|
||||
|
||||
@app.task
|
||||
@app.task(name='soft_stop')
|
||||
def soft_stop(plan_uid):
|
||||
dg = graph.get_graph(plan_uid)
|
||||
for n in dg:
|
||||
@ -128,7 +135,7 @@ def soft_stop(plan_uid):
|
||||
graph.save_graph(plan_uid, dg)
|
||||
|
||||
|
||||
@app.task
|
||||
@app.task(name='schedule_next')
|
||||
def schedule_next(task_id, status, errmsg=None):
|
||||
plan_uid, task_name = task_id.rsplit(':', 1)
|
||||
dg = graph.get_graph(plan_uid)
|
||||
@ -136,62 +143,3 @@ def schedule_next(task_id, status, errmsg=None):
|
||||
dg.node[task_name]['errmsg'] = errmsg
|
||||
|
||||
schedule(plan_uid, dg)
|
||||
|
||||
# TODO(dshulyak) some tasks should be evaluated even if not all predecessors
|
||||
# succeded, how to identify this?
|
||||
# - add ignor_error on edge
|
||||
# - add ignore_predecessor_errors on task in consideration
|
||||
# - make fault_tolerance not a task but a policy for all tasks
|
||||
control_tasks = [fault_tolerance, anchor]
|
||||
|
||||
|
||||
def traverse(dg):
|
||||
"""
|
||||
1. Node should be visited only when all predecessors already visited
|
||||
2. Visited nodes should have any state except PENDING, INPROGRESS, for now
|
||||
is SUCCESS or ERROR, but it can be extended
|
||||
3. If node is INPROGRESS it should not be visited once again
|
||||
"""
|
||||
visited = set()
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
if data['status'] not in ('PENDING', 'INPROGRESS', 'SKIPPED'):
|
||||
visited.add(node)
|
||||
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
|
||||
if node in visited:
|
||||
continue
|
||||
elif data['status'] in ('INPROGRESS', 'SKIPPED'):
|
||||
continue
|
||||
|
||||
predecessors = set(dg.predecessors(node))
|
||||
|
||||
if predecessors <= visited:
|
||||
task_id = '{}:{}'.format(dg.graph['uid'], node)
|
||||
|
||||
task_name = '{}.{}'.format(__name__, data['type'])
|
||||
task = app.tasks[task_name]
|
||||
|
||||
if all_success(dg, predecessors) or task in control_tasks:
|
||||
dg.node[node]['status'] = 'INPROGRESS'
|
||||
for t in generate_task(task, dg, data, task_id):
|
||||
yield t
|
||||
|
||||
|
||||
def generate_task(task, dg, data, task_id):
|
||||
|
||||
subtask = task.subtask(
|
||||
data['args'], task_id=task_id,
|
||||
time_limit=data.get('time_limit', None),
|
||||
soft_time_limit=data.get('soft_time_limit', None))
|
||||
|
||||
if data.get('target', None):
|
||||
subtask.set(queue=data['target'])
|
||||
|
||||
yield subtask
|
||||
|
||||
|
||||
def all_success(dg, nodes):
|
||||
return all((dg.node[n]['status'] == 'SUCCESS' for n in nodes))
|
||||
|
36
solar/solar/orchestration/traversal.py
Normal file
36
solar/solar/orchestration/traversal.py
Normal file
@ -0,0 +1,36 @@
|
||||
"""
|
||||
|
||||
task should be visited only when predecessors are visited,
|
||||
visited node could be only in SUCCESS or ERROR
|
||||
|
||||
task can be scheduled for execution if it is not yet visited, and state
|
||||
not in SKIPPED, INPROGRESS
|
||||
|
||||
PENDING - task that is scheduled to be executed
|
||||
ERROR - visited node, but failed, can be failed by timeout
|
||||
SUCCESS - visited node, successfull
|
||||
INPROGRESS - task already scheduled, can be moved to ERROR or SUCCESS
|
||||
SKIPPED - not visited, and should be skipped from execution
|
||||
"""
|
||||
|
||||
|
||||
VISITED = ('SUCCESS', 'ERROR', 'NOOP')
|
||||
BLOCKED = ('INPROGRESS', 'SKIPPED')
|
||||
|
||||
|
||||
def traverse(dg):
|
||||
|
||||
visited = set()
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
if data['status'] in VISITED:
|
||||
visited.add(node)
|
||||
|
||||
for node in dg:
|
||||
data = dg.node[node]
|
||||
|
||||
if node in visited or data['status'] in BLOCKED:
|
||||
continue
|
||||
|
||||
if set(dg.predecessors(node)) <= visited:
|
||||
yield node
|
@ -89,7 +89,7 @@ def send_to_orchestration():
|
||||
for r in resource.load_all().values()}
|
||||
commited = data.CD()
|
||||
|
||||
for res_uid in conn_graph:
|
||||
for res_uid in nx.topological_sort(conn_graph):
|
||||
commited_data = commited.get(res_uid, {})
|
||||
staged_data = staged.get(res_uid, {})
|
||||
|
||||
@ -101,7 +101,8 @@ def send_to_orchestration():
|
||||
errmsg=None,
|
||||
**parameters(res_uid, guess_action(commited_data, staged_data)))
|
||||
for pred in conn_graph.predecessors(res_uid):
|
||||
dg.add_edge(pred, res_uid)
|
||||
if pred in dg:
|
||||
dg.add_edge(pred, res_uid)
|
||||
|
||||
# what it should be?
|
||||
dg.graph['name'] = 'system_log'
|
||||
|
@ -4,7 +4,7 @@ import tempfile
|
||||
import unittest
|
||||
import yaml
|
||||
|
||||
from solar.core import virtual_resource as vr
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.core import signals as xs
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
22
solar/solar/test/test_celery_executor.py
Normal file
22
solar/solar/test/test_celery_executor.py
Normal file
@ -0,0 +1,22 @@
|
||||
|
||||
import networkx as nx
|
||||
from pytest import fixture
|
||||
from mock import patch
|
||||
|
||||
from solar.orchestration import executor
|
||||
|
||||
|
||||
@fixture
|
||||
def dg():
|
||||
ex = nx.DiGraph()
|
||||
ex.add_node('t1', args=['t'], status='PENDING', type='echo')
|
||||
ex.graph['uid'] = 'some_string'
|
||||
return ex
|
||||
|
||||
|
||||
@patch.object(executor, 'app')
|
||||
def test_celery_executor(mapp, dg):
|
||||
"""Just check that it doesnt fail for now.
|
||||
"""
|
||||
assert executor.celery_executor(dg, ['t1'])
|
||||
assert dg.node['t1']['status'] == 'INPROGRESS'
|
@ -3,7 +3,7 @@ from pytest import fixture
|
||||
from dictdiffer import revert, patch
|
||||
import networkx as nx
|
||||
|
||||
from solar import operations
|
||||
from solar.system_log import change
|
||||
from solar.core.resource import wrap_resource
|
||||
|
||||
|
||||
@ -32,12 +32,12 @@ def commited():
|
||||
|
||||
@fixture
|
||||
def full_diff(staged):
|
||||
return operations.create_diff(staged, {})
|
||||
return change.create_diff(staged, {})
|
||||
|
||||
|
||||
@fixture
|
||||
def diff_for_update(staged, commited):
|
||||
return operations.create_diff(staged, commited)
|
||||
return change.create_diff(staged, commited)
|
||||
|
||||
|
||||
def test_create_diff_with_empty_commited(full_diff):
|
||||
@ -98,7 +98,7 @@ def conn_graph():
|
||||
|
||||
def test_stage_changes(resources, conn_graph):
|
||||
commited = {}
|
||||
log = operations._stage_changes(resources, conn_graph, commited, [])
|
||||
log = change._stage_changes(resources, conn_graph, commited, [])
|
||||
|
||||
assert len(log) == 3
|
||||
assert [l.res for l in log] == ['n.1', 'r.1', 'h.1']
|
||||
|
50
solar/solar/test/test_limits.py
Normal file
50
solar/solar/test/test_limits.py
Normal file
@ -0,0 +1,50 @@
|
||||
|
||||
|
||||
from pytest import fixture
|
||||
import networkx as nx
|
||||
|
||||
from solar.orchestration import limits
|
||||
|
||||
|
||||
@fixture
|
||||
def dg():
|
||||
ex = nx.DiGraph()
|
||||
ex.add_node('t1', status='PENDING', target='1',
|
||||
resource_type='node', type_limit=2)
|
||||
ex.add_node('t2', status='PENDING', target='1',
|
||||
resource_type='node', type_limit=2)
|
||||
ex.add_node('t3', status='PENDING', target='1',
|
||||
resource_type='node', type_limit=2)
|
||||
return ex
|
||||
|
||||
|
||||
def test_target_rule(dg):
|
||||
|
||||
assert limits.target_based_rule(dg, [], 't1') == True
|
||||
assert limits.target_based_rule(dg, ['t1'], 't2') == False
|
||||
|
||||
|
||||
def test_type_limit_rule(dg):
|
||||
assert limits.type_based_rule(dg, ['t1'], 't2') == True
|
||||
assert limits.type_based_rule(dg, ['t1', 't2'], 't3') == False
|
||||
|
||||
|
||||
def test_items_rule(dg):
|
||||
|
||||
assert limits.items_rule(dg, ['1']*99, '2')
|
||||
assert limits.items_rule(dg, ['1']*99, '2', limit=10) == False
|
||||
|
||||
|
||||
@fixture
|
||||
def target_dg():
|
||||
ex = nx.DiGraph()
|
||||
ex.add_node('t1', status='PENDING', target='1')
|
||||
ex.add_node('t2', status='PENDING', target='1')
|
||||
|
||||
return ex
|
||||
|
||||
|
||||
def test_filtering_chain(target_dg):
|
||||
|
||||
chain = limits.get_default_chain(target_dg, [], ['t1', 't2'])
|
||||
assert list(chain) == ['t1']
|
@ -1,74 +0,0 @@
|
||||
|
||||
import pytest
|
||||
|
||||
from solar.core import resource
|
||||
from solar import operations
|
||||
from solar import state
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_resources():
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
|
||||
node1 = resource.wrap_resource(
|
||||
{'id': 'node1',
|
||||
'input': {'ip': {'value':'10.0.0.3'}}})
|
||||
rabbitmq_service1 = resource.wrap_resource(
|
||||
{'id':'rabbitmq',
|
||||
'input': {
|
||||
'ip' : {'value': ''},
|
||||
'image': {'value': 'rabbitmq:3-management'}}})
|
||||
signals.connect(node1, rabbitmq_service1)
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
@pytest.mark.usefixtures("default_resources")
|
||||
def test_changes_on_update_image():
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
operations.commit_changes()
|
||||
|
||||
rabbitmq = resource.load('rabbitmq')
|
||||
rabbitmq.update({'image': 'different'})
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 1
|
||||
|
||||
item = log.items[0]
|
||||
|
||||
assert item.diff == [
|
||||
('change', u'input.image.value',
|
||||
(u'rabbitmq:3-management', u'different')),
|
||||
('change', u'metadata.input.image.value',
|
||||
(u'rabbitmq:3-management', u'different'))]
|
||||
|
||||
assert item.action == 'update'
|
||||
|
||||
operations.commit_changes()
|
||||
|
||||
commited = state.CD()
|
||||
|
||||
assert commited['rabbitmq']['input']['image'] == {
|
||||
u'emitter': None, u'value': u'different'}
|
||||
|
||||
reverse = operations.rollback(state.CL().items[-1])
|
||||
|
||||
assert reverse.diff == [
|
||||
('change', u'input.image.value',
|
||||
(u'different', u'rabbitmq:3-management')),
|
||||
('change', u'metadata.input.image.value',
|
||||
(u'different', u'rabbitmq:3-management'))]
|
||||
|
||||
operations.commit_changes()
|
||||
|
||||
commited = state.CD()
|
||||
|
||||
assert commited['rabbitmq']['input']['image'] == {
|
||||
u'emitter': None, u'value': u'rabbitmq:3-management'}
|
||||
|
||||
|
||||
|
||||
|
56
solar/solar/test/test_traversal.py
Normal file
56
solar/solar/test/test_traversal.py
Normal file
@ -0,0 +1,56 @@
|
||||
|
||||
|
||||
import networkx as nx
|
||||
from pytest import fixture
|
||||
|
||||
from solar.orchestration.traversal import traverse
|
||||
|
||||
@fixture
|
||||
def tasks():
|
||||
return [
|
||||
{'id': 't1', 'status': 'PENDING'},
|
||||
{'id': 't2', 'status': 'PENDING'},
|
||||
{'id': 't3', 'status': 'PENDING'},
|
||||
{'id': 't4', 'status': 'PENDING'},
|
||||
{'id': 't5', 'status': 'PENDING'}]
|
||||
|
||||
@fixture
|
||||
def dg(tasks):
|
||||
ex = nx.DiGraph()
|
||||
for t in tasks:
|
||||
ex.add_node(t['id'], status=t['status'])
|
||||
return ex
|
||||
|
||||
|
||||
def test_parallel(dg):
|
||||
dg.add_path(['t1', 't3', 't4', 't5'])
|
||||
dg.add_path(['t2', 't3'])
|
||||
|
||||
assert set(traverse(dg)) == {'t1', 't2'}
|
||||
|
||||
|
||||
def test_walked_only_when_all_predecessors_visited(dg):
|
||||
dg.add_path(['t1', 't3', 't4', 't5'])
|
||||
dg.add_path(['t2', 't3'])
|
||||
|
||||
dg.node['t1']['status'] = 'SUCCESS'
|
||||
dg.node['t2']['status'] = 'INPROGRESS'
|
||||
|
||||
assert set(traverse(dg)) == set()
|
||||
|
||||
dg.node['t2']['status'] = 'SUCCESS'
|
||||
|
||||
assert set(traverse(dg)) == {'t3'}
|
||||
|
||||
|
||||
def test_nothing_will_be_walked_if_parent_is_skipped(dg):
|
||||
dg.add_path(['t1', 't2', 't3', 't4', 't5'])
|
||||
dg.node['t1']['status'] = 'SKIPPED'
|
||||
|
||||
assert set(traverse(dg)) == set()
|
||||
|
||||
def test_node_will_be_walked_if_parent_is_noop(dg):
|
||||
dg.add_path(['t1', 't2', 't3', 't4', 't5'])
|
||||
dg.node['t1']['status'] = 'NOOP'
|
||||
|
||||
assert set(traverse(dg)) == {'t2'}
|
@ -1,169 +0,0 @@
|
||||
import pytest
|
||||
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
from solar import operations
|
||||
|
||||
@pytest.fixture
|
||||
def resources():
|
||||
|
||||
node1 = resource.wrap_resource(
|
||||
{'id': 'node1',
|
||||
'input': {'ip': {'value': '10.0.0.3'}}})
|
||||
mariadb_service1 = resource.wrap_resource(
|
||||
{'id': 'mariadb',
|
||||
'input': {
|
||||
'port' : {'value': 3306},
|
||||
'ip': {'value': ''}}})
|
||||
keystone_db = resource.wrap_resource(
|
||||
{'id':'keystone_db',
|
||||
'input': {
|
||||
'login_port' : {'value': ''},
|
||||
'ip': {'value': ''}}})
|
||||
signals.connect(node1, mariadb_service1)
|
||||
signals.connect(node1, keystone_db)
|
||||
signals.connect(mariadb_service1, keystone_db, {'port': 'login_port'})
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
def test_update_port_on_mariadb(resources):
|
||||
operations.stage_changes()
|
||||
operations.commit_changes()
|
||||
|
||||
mariadb = resources['mariadb']
|
||||
|
||||
mariadb.update({'port': 4400})
|
||||
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
mariadb_log = log.items[0]
|
||||
|
||||
assert mariadb_log.diff == [
|
||||
('change', u'input.port.value', (3306, 4400)),
|
||||
('change', u'metadata.input.port.value', (3306, 4400))]
|
||||
|
||||
keystone_db_log = log.items[1]
|
||||
|
||||
assert keystone_db_log.diff == [
|
||||
('change', u'input.login_port.value', (3306, 4400)),
|
||||
('change', u'metadata.input.login_port.value', (3306, 4400))]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def simple_input():
|
||||
res1 = resource.wrap_resource(
|
||||
{'id': 'res1',
|
||||
'input': {'ip': {'value': '10.10.0.2'}}})
|
||||
res2 = resource.wrap_resource(
|
||||
{'id': 'res2',
|
||||
'input': {'ip': {'value': '10.10.0.3'}}})
|
||||
|
||||
signals.connect(res1, res2)
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
def test_update_simple_resource(simple_input):
|
||||
operations.stage_changes()
|
||||
operations.commit_changes()
|
||||
|
||||
res1 = simple_input['res1']
|
||||
res1.update({'ip': '10.0.0.3'})
|
||||
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
assert log.items[0].diff == [
|
||||
('change', u'input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
('change', 'metadata.input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
]
|
||||
assert log.items[1].diff == [
|
||||
('change', u'input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
('change', 'metadata.input.ip.value', ('10.10.0.2', '10.0.0.3')),
|
||||
]
|
||||
|
||||
operations.commit_changes()
|
||||
assert simple_input['res1'].args_dict() == {
|
||||
'ip': '10.0.0.3',
|
||||
}
|
||||
assert simple_input['res2'].args_dict() == {
|
||||
'ip': '10.0.0.3',
|
||||
}
|
||||
|
||||
log_item = operations.rollback_last()
|
||||
assert log_item.diff == [
|
||||
('change', u'input.ip.value', (u'10.0.0.3', u'10.10.0.2')),
|
||||
('change', 'metadata.input.ip.value', ('10.0.0.3', '10.10.0.2')),
|
||||
]
|
||||
|
||||
res2 = resource.load('res2')
|
||||
assert res2.args_dict() == {
|
||||
'ip': '10.10.0.2',
|
||||
}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def list_input():
|
||||
res1 = resource.wrap_resource(
|
||||
{'id': 'res1',
|
||||
'input': {'ip': {'value': '10.10.0.2'}}})
|
||||
res2 = resource.wrap_resource(
|
||||
{'id': 'res2',
|
||||
'input': {'ip': {'value': '10.10.0.3'}}})
|
||||
consumer = resource.wrap_resource(
|
||||
{'id': 'consumer',
|
||||
'input':
|
||||
{'ips': {'value': [],
|
||||
'schema': ['str']}}})
|
||||
|
||||
signals.connect(res1, consumer, {'ip': 'ips'})
|
||||
signals.connect(res2, consumer, {'ip': 'ips'})
|
||||
return resource.load_all()
|
||||
|
||||
|
||||
def test_update_list_resource(list_input):
|
||||
operations.stage_changes()
|
||||
operations.commit_changes()
|
||||
|
||||
res3 = resource.wrap_resource(
|
||||
{'id': 'res3',
|
||||
'input': {'ip': {'value': '10.10.0.4'}}})
|
||||
signals.connect(res3, list_input['consumer'], {'ip': 'ips'})
|
||||
|
||||
log = operations.stage_changes()
|
||||
|
||||
assert len(log) == 2
|
||||
|
||||
assert log.items[0].res == res3.name
|
||||
assert log.items[1].diff == [
|
||||
('add', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]),
|
||||
('add', u'input.ips', [
|
||||
(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]),
|
||||
('add', u'metadata.input.ips.value',
|
||||
[(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])]
|
||||
|
||||
operations.commit_changes()
|
||||
assert list_input['consumer'].args_dict() == {
|
||||
u'ips': [
|
||||
{u'emitter_attached_to': u'res1', u'emitter': u'ip', u'value': u'10.10.0.2'},
|
||||
{u'emitter_attached_to': u'res2', u'emitter': u'ip', u'value': u'10.10.0.3'},
|
||||
{u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'}]}
|
||||
|
||||
log_item = operations.rollback_last()
|
||||
assert log_item.diff == [
|
||||
('remove', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]),
|
||||
('remove', u'input.ips', [
|
||||
(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]),
|
||||
('remove', u'metadata.input.ips.value',
|
||||
[(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])]
|
||||
|
||||
consumer = resource.load('consumer')
|
||||
assert consumer.args_dict() == {
|
||||
u'ips': [{u'emitter': u'ip',
|
||||
u'emitter_attached_to': u'res1',
|
||||
u'value': u'10.10.0.2'},
|
||||
{u'emitter': u'ip',
|
||||
u'emitter_attached_to': u'res2',
|
||||
u'value': u'10.10.0.3'}]}
|
2
solar/test-requirements.txt
Normal file
2
solar/test-requirements.txt
Normal file
@ -0,0 +1,2 @@
|
||||
-r requirements.txt
|
||||
mock
|
@ -25,6 +25,7 @@ resources:
|
||||
login_password: 'mariadb_service::root_password'
|
||||
login_port: 'mariadb_service::port'
|
||||
db_name: 'keystone'
|
||||
db_host: 'mariadb_service::ip'
|
||||
user_password: 'keystone'
|
||||
user_name: 'keystone'
|
||||
ip: '{{ip}}'
|
||||
@ -35,19 +36,18 @@ resources:
|
||||
from: templates/keystone_api.yml
|
||||
values:
|
||||
idx: 1
|
||||
image: 'kollaglue/centos-rdo-k-keystone'
|
||||
config_dir: '/etc/solar/keystone_config_1'
|
||||
db_password: 'keystone_db_user::user_password'
|
||||
db_user: 'keystone_db_user::user_name'
|
||||
db_port: 'keystone_db_user::login_port'
|
||||
db_name: 'keystone_db_user::db_name'
|
||||
db_host: 'mariadb_service::ip'
|
||||
admin_token: 132fdsfwqee
|
||||
admin_port: 35357
|
||||
port: 5000
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
# TODO: HAproxy
|
||||
|
||||
- id: openstack_base
|
||||
from: templates/openstack_base.yml
|
||||
values:
|
||||
@ -57,34 +57,20 @@ resources:
|
||||
keystone_ip: 'keystone_service_1::ip'
|
||||
keystone_admin_port: 'keystone_service_1::admin_port'
|
||||
keystone_port: 'keystone_service_1::port'
|
||||
admin_token: 'keystone_config_1::admin_token'
|
||||
admin_token: 'keystone_service_1::admin_token'
|
||||
|
||||
- id: glance_base
|
||||
from: templates/glance_base.yml
|
||||
- id: openrc_file
|
||||
from: resources/openrc_file
|
||||
values:
|
||||
login_user: root
|
||||
login_password: 'mariadb_service::root_password'
|
||||
login_port: 'mariadb_service::port'
|
||||
db_name: 'glance'
|
||||
user_password: 'glance'
|
||||
user_name: 'glance'
|
||||
keystone_host: 'keystone_service_1::ip'
|
||||
keystone_port: 'keystone_service_1::admin_port'
|
||||
tenant: 'admin_user::tenant_name'
|
||||
user_name: 'admin_user::user_name'
|
||||
password: 'admin_user::user_password'
|
||||
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
- id: glance_registry_1
|
||||
from: templates/glance_registry.yml
|
||||
values:
|
||||
idx: 1
|
||||
keystone_admin_port: 'keystone_service_1::admin_port'
|
||||
keystone_ip: 'keystone_service_1::ip'
|
||||
mysql_password: 'glance_db_user::user_password'
|
||||
mysql_user: 'keystone_db_user::user_name'
|
||||
mysql_db: 'keystone_db_user::db_name'
|
||||
mysql_ip: 'mariadb_service::ip'
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
tags: ['resources/controller', 'resource/primary_controller']
|
||||
|
||||
|
@ -1,27 +1,20 @@
|
||||
id: keystone_api_{{idx}}
|
||||
|
||||
resources:
|
||||
- id: keystone_config_{{idx}}
|
||||
from: resources/keystone_config
|
||||
- id: keystone_service_{{idx}}
|
||||
from: resources/keystone_puppet
|
||||
values:
|
||||
config_dir: '/etc/solar/keystone_{{idx}}'
|
||||
admin_token: '{{admin_token}}'
|
||||
db_host: '{{db_host}}'
|
||||
db_port: '{{db_port}}'
|
||||
db_name: '{{db_name}}'
|
||||
db_user: '{{db_user}}'
|
||||
db_password: '{{db_password}}'
|
||||
|
||||
admin_port: {{admin_port}}
|
||||
port: {{port}}
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
|
||||
- id: keystone_service_{{idx}}
|
||||
from: resources/keystone_service
|
||||
values:
|
||||
image: 'kollaglue/centos-rdo-j-keystone'
|
||||
config_dir: 'keystone_config_{{idx}}::config_dir'
|
||||
ip: 'keystone_config_{{idx}}::ip'
|
||||
ssh_user: 'keystone_config_{{idx}}::ssh_user'
|
||||
ssh_key: 'keystone_config_{{idx}}::ssh_key'
|
||||
|
||||
tags: ['resources/keystone', 'resource/keystone_api']
|
||||
|
@ -4,7 +4,8 @@ resources:
|
||||
- id: keystone_db
|
||||
from: resources/mariadb_db
|
||||
values:
|
||||
db_name: {{db_name}}
|
||||
db_name: '{{db_name}}'
|
||||
db_host: '{{db_host}}'
|
||||
login_user: '{{login_user}}'
|
||||
login_password: '{{login_password}}'
|
||||
login_port: '{{login_port}}'
|
||||
@ -18,6 +19,7 @@ resources:
|
||||
user_password: '{{user_password}}'
|
||||
user_name: '{{user_name}}'
|
||||
db_name: 'keystone_db::db_name'
|
||||
db_host: '{{db_host}}'
|
||||
login_user: 'keystone_db::login_user'
|
||||
login_password: 'keystone_db::login_password'
|
||||
login_port: 'keystone_db::login_port'
|
||||
|
@ -25,6 +25,19 @@ resources:
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
- id: admin_role
|
||||
from: resources/keystone_role
|
||||
values:
|
||||
role_name: 'admin'
|
||||
user_name: 'admin_user::user_name'
|
||||
tenant_name: 'admin_user::tenant_name'
|
||||
keystone_port: '{{keystone_admin_port}}'
|
||||
keystone_host: '{{keystone_ip}}'
|
||||
admin_token: '{{admin_token}}'
|
||||
ip: '{{ip}}'
|
||||
ssh_user: '{{ssh_user}}'
|
||||
ssh_key: '{{ssh_key}}'
|
||||
|
||||
- id: keystone_service_endpoint
|
||||
from: resources/keystone_service_endpoint
|
||||
values:
|
||||
@ -35,7 +48,7 @@ resources:
|
||||
{% endraw %}
|
||||
description: 'OpenStack Identity Service'
|
||||
type: 'identity'
|
||||
name: 'keystone'
|
||||
endpoint_name: 'keystone'
|
||||
admin_port: '{{keystone_admin_port}}'
|
||||
public_port: '{{keystone_port}}'
|
||||
internal_port: '{{keystone_port}}'
|
||||
|
Loading…
Reference in New Issue
Block a user