Resource providers added (with GitProvider as sample)
- all keystone_* resources are moved to separate repository - example.py adjusted
This commit is contained in:
parent
d59ee0a96d
commit
477d68e604
@ -11,6 +11,7 @@ file-system-db:
|
||||
|
||||
template-dir: /vagrant/templates
|
||||
|
||||
resources-directory: /tmp/git
|
||||
resources-files-mask: /vagrant/resources/*/*.yaml
|
||||
node_resource_template: /vagrant/resources/ro_node/
|
||||
|
||||
|
28
example.py
28
example.py
@ -10,6 +10,10 @@ from solar.core import signals
|
||||
from solar.core import validation
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.core.resource_provider import GitProvider
|
||||
|
||||
|
||||
GIT_KEYSTONE_RESOURCE_URL = 'https://github.com/CGenie/keystone-resource'
|
||||
|
||||
|
||||
@click.group()
|
||||
@ -35,11 +39,11 @@ def deploy():
|
||||
keystone_db = resource.create('keystone_db', 'resources/mariadb_keystone_db/', {'db_name': 'keystone_db', 'login_user': 'root'})
|
||||
keystone_db_user = resource.create('keystone_db_user', 'resources/mariadb_keystone_user/', {'new_user_name': 'keystone', 'new_user_password': 'keystone', 'login_user': 'root'})
|
||||
|
||||
keystone_config1 = resource.create('keystone_config1', 'resources/keystone_config/', {'config_dir': '/etc/solar/keystone', 'admin_token': 'admin'})
|
||||
keystone_service1 = resource.create('keystone_service1', 'resources/keystone_service/', {'port': 5001, 'admin_port': 35357})
|
||||
keystone_config1 = resource.create('keystone_config1', GitProvider(GIT_KEYSTONE_RESOURCE_URL, path='keystone_config'), {'config_dir': '/etc/solar/keystone', 'admin_token': 'admin'})
|
||||
keystone_service1 = resource.create('keystone_service1', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service'), {'port': 5001, 'admin_port': 35357})
|
||||
|
||||
keystone_config2 = resource.create('keystone_config2', 'resources/keystone_config/', {'config_dir': '/etc/solar/keystone', 'admin_token': 'admin'})
|
||||
keystone_service2 = resource.create('keystone_service2', 'resources/keystone_service/', {'port': 5002, 'admin_port': 35358})
|
||||
keystone_config2 = resource.create('keystone_config2', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_config'), {'config_dir': '/etc/solar/keystone', 'admin_token': 'admin'})
|
||||
keystone_service2 = resource.create('keystone_service2', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service'), {'port': 5002, 'admin_port': 35358})
|
||||
|
||||
haproxy_keystone_config = resource.create('haproxy_keystone1_config', 'resources/haproxy_service_config/', {'name': 'keystone_config', 'listen_port': 5000, 'servers':[], 'ports':[]})
|
||||
haproxy_config = resource.create('haproxy_config', 'resources/haproxy_config', {'configs_names':[], 'configs_ports':[], 'listen_ports':[], 'configs':[]})
|
||||
@ -48,10 +52,10 @@ def deploy():
|
||||
glance_db = resource.create('glance_db', 'resources/mariadb_db/', {'db_name': 'glance_db', 'login_user': 'root'})
|
||||
glance_db_user = resource.create('glance_db_user', 'resources/mariadb_user/', {'new_user_name': 'glance', 'new_user_password': 'glance', 'login_user': 'root'})
|
||||
|
||||
services_tenant = resource.create('glance_keystone_tenant', 'resources/keystone_tenant', {'tenant_name': 'services'})
|
||||
services_tenant = resource.create('glance_keystone_tenant', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_tenant'), {'tenant_name': 'services'})
|
||||
|
||||
glance_keystone_user = resource.create('glance_keystone_user', 'resources/keystone_user', {'user_name': 'glance_admin', 'user_password': 'password1234', 'tenant_name': 'service_admins'})
|
||||
glance_keystone_role = resource.create('glance_keystone_role', 'resources/keystone_role', {'role_name': 'admin'})
|
||||
glance_keystone_user = resource.create('glance_keystone_user', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_user'), {'user_name': 'glance_admin', 'user_password': 'password1234', 'tenant_name': 'service_admins'})
|
||||
glance_keystone_role = resource.create('glance_keystone_role', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_role'), {'role_name': 'admin'})
|
||||
|
||||
# TODO: add api_host and registry_host -- they can be different! Currently 'ip' is used.
|
||||
glance_config = resource.create('glance_config', 'resources/glance_config/', {'api_port': 9393})
|
||||
@ -60,15 +64,15 @@ def deploy():
|
||||
# TODO: admin_port should be refactored, we need to rethink docker
|
||||
# container resource and make it common for all
|
||||
# resources used in this demo
|
||||
glance_api_endpoint = resource.create('glance_api_endpoint', 'resources/keystone_service_endpoint/', {'adminurl': 'http://{{ip}}:{{admin_port}}', 'internalurl': 'http://{{ip}}:{{port}}', 'publicurl': 'http://{{ip}}:{{port}}', 'description': 'OpenStack Image Service', 'type': 'image'})
|
||||
glance_api_endpoint = resource.create('glance_api_endpoint', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service_endpoint'), {'adminurl': 'http://{{ip}}:{{admin_port}}', 'internalurl': 'http://{{ip}}:{{port}}', 'publicurl': 'http://{{ip}}:{{port}}', 'description': 'OpenStack Image Service', 'type': 'image'})
|
||||
# TODO: ports value 9393 is a HACK -- fix glance_api_container's port and move to some config
|
||||
# TODO: glance registry container's API port needs to point to haproxy_config
|
||||
haproxy_glance_api_config = resource.create('haproxy_glance_api_config', 'resources/haproxy_service_config/', {'name': 'glance_api_config', 'listen_port': 9292, 'servers': [], 'ports':[{'value': 9393}]})
|
||||
|
||||
admin_tenant = resource.create('admin_tenant', 'resources/keystone_tenant', {'tenant_name': 'admin'})
|
||||
admin_user = resource.create('admin_user', 'resources/keystone_user', {'user_name': 'admin', 'user_password': 'admin'})
|
||||
admin_role = resource.create('admin_role', 'resources/keystone_role', {'role_name': 'admin'})
|
||||
keystone_service_endpoint = resource.create('keystone_service_endpoint', 'resources/keystone_service_endpoint/', {'adminurl': 'http://{{ip}}:{{admin_port}}/v2.0', 'internalurl': 'http://{{ip}}:{{port}}/v2.0', 'publicurl': 'http://{{ip}}:{{port}}/v2.0', 'description': 'OpenStack Identity Service', 'type': 'identity'})
|
||||
admin_tenant = resource.create('admin_tenant', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_tenant'), {'tenant_name': 'admin'})
|
||||
admin_user = resource.create('admin_user', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_user'), {'user_name': 'admin', 'user_password': 'admin'})
|
||||
admin_role = resource.create('admin_role', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_role'), {'role_name': 'admin'})
|
||||
keystone_service_endpoint = resource.create('keystone_service_endpoint', GitProvider(GIT_KEYSTONE_RESOURCE_URL, 'keystone_service_endpoint'), {'adminurl': 'http://{{ip}}:{{admin_port}}/v2.0', 'internalurl': 'http://{{ip}}:{{port}}/v2.0', 'publicurl': 'http://{{ip}}:{{port}}/v2.0', 'description': 'OpenStack Identity Service', 'type': 'identity'})
|
||||
|
||||
|
||||
####
|
||||
|
@ -1,4 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- file: path={{config_dir}} state=absent
|
@ -1,17 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
vars:
|
||||
admin_token: {{admin_token}}
|
||||
keystone_host: {{ ip }}
|
||||
keystone_port: {{ port }}
|
||||
db_user: {{db_user}}
|
||||
db_password: {{db_password}}
|
||||
db_host: {{db_host}}
|
||||
db_name: {{db_name}}
|
||||
tasks:
|
||||
- file: path={{config_dir}} state=directory
|
||||
- template: src={{resource_dir}}/templates/keystone.conf dest={{config_dir}}/keystone.conf
|
||||
- template: src={{resource_dir}}/templates/default_catalog.templates dest={{config_dir}}/default_catalog.templates
|
||||
- template: src={{resource_dir}}/templates/logging.conf dest={{config_dir}}/logging.conf
|
||||
- template: src={{resource_dir}}/templates/policy.json dest={{config_dir}}/policy.json
|
||||
- template: src={{resource_dir}}/templates/exports dest={{ config_dir }}/keystone-exports
|
@ -1,37 +0,0 @@
|
||||
id: keystone_config
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
|
||||
input:
|
||||
config_dir:
|
||||
schema: str!
|
||||
value: /etc/solar/keystone
|
||||
admin_token:
|
||||
schema: str!
|
||||
value: admin
|
||||
db_password:
|
||||
schema: str!
|
||||
value: password
|
||||
db_user:
|
||||
schema: str!
|
||||
value: keystone
|
||||
db_host:
|
||||
schema: str!
|
||||
value:
|
||||
db_port:
|
||||
schema: int!
|
||||
value:
|
||||
db_name:
|
||||
schema: str!
|
||||
value: keystone
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/keystone_config, resources/keystone]
|
@ -1,27 +0,0 @@
|
||||
# config for templated.Catalog, using camelCase because I don't want to do
|
||||
# translations for keystone compat
|
||||
catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v2.0
|
||||
catalog.RegionOne.identity.adminURL = http://localhost:$(admin_port)s/v2.0
|
||||
catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v2.0
|
||||
catalog.RegionOne.identity.name = Identity Service
|
||||
|
||||
# fake compute service for now to help novaclient tests work
|
||||
catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s
|
||||
catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s
|
||||
catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s
|
||||
catalog.RegionOne.compute.name = Compute Service
|
||||
|
||||
catalog.RegionOne.volume.publicURL = http://localhost:8776/v1/$(tenant_id)s
|
||||
catalog.RegionOne.volume.adminURL = http://localhost:8776/v1/$(tenant_id)s
|
||||
catalog.RegionOne.volume.internalURL = http://localhost:8776/v1/$(tenant_id)s
|
||||
catalog.RegionOne.volume.name = Volume Service
|
||||
|
||||
catalog.RegionOne.ec2.publicURL = http://localhost:8773/services/Cloud
|
||||
catalog.RegionOne.ec2.adminURL = http://localhost:8773/services/Admin
|
||||
catalog.RegionOne.ec2.internalURL = http://localhost:8773/services/Cloud
|
||||
catalog.RegionOne.ec2.name = EC2 Service
|
||||
|
||||
catalog.RegionOne.image.publicURL = http://localhost:9292/v1
|
||||
catalog.RegionOne.image.adminURL = http://localhost:9292/v1
|
||||
catalog.RegionOne.image.internalURL = http://localhost:9292/v1
|
||||
catalog.RegionOne.image.name = Image Service
|
@ -1,2 +0,0 @@
|
||||
export OS_SERVICE_ENDPOINT=http://localhost:35357/v2.0/
|
||||
export OS_SERVICE_TOKEN={{ admin_token }}
|
File diff suppressed because it is too large
Load Diff
@ -1,65 +0,0 @@
|
||||
[loggers]
|
||||
keys=root,access
|
||||
|
||||
[handlers]
|
||||
keys=production,file,access_file,devel
|
||||
|
||||
[formatters]
|
||||
keys=minimal,normal,debug
|
||||
|
||||
|
||||
###########
|
||||
# Loggers #
|
||||
###########
|
||||
|
||||
[logger_root]
|
||||
level=WARNING
|
||||
handlers=file
|
||||
|
||||
[logger_access]
|
||||
level=INFO
|
||||
qualname=access
|
||||
handlers=access_file
|
||||
|
||||
|
||||
################
|
||||
# Log Handlers #
|
||||
################
|
||||
|
||||
[handler_production]
|
||||
class=handlers.SysLogHandler
|
||||
level=ERROR
|
||||
formatter=normal
|
||||
args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER)
|
||||
|
||||
[handler_file]
|
||||
class=handlers.WatchedFileHandler
|
||||
level=WARNING
|
||||
formatter=normal
|
||||
args=('error.log',)
|
||||
|
||||
[handler_access_file]
|
||||
class=handlers.WatchedFileHandler
|
||||
level=INFO
|
||||
formatter=minimal
|
||||
args=('access.log',)
|
||||
|
||||
[handler_devel]
|
||||
class=StreamHandler
|
||||
level=NOTSET
|
||||
formatter=debug
|
||||
args=(sys.stdout,)
|
||||
|
||||
|
||||
##################
|
||||
# Log Formatters #
|
||||
##################
|
||||
|
||||
[formatter_minimal]
|
||||
format=%(message)s
|
||||
|
||||
[formatter_normal]
|
||||
format=(%(name)s): %(asctime)s %(levelname)s %(message)s
|
||||
|
||||
[formatter_debug]
|
||||
format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s
|
@ -1,171 +0,0 @@
|
||||
{
|
||||
"admin_required": "role:admin or is_admin:1",
|
||||
"service_role": "role:service",
|
||||
"service_or_admin": "rule:admin_required or rule:service_role",
|
||||
"owner" : "user_id:%(user_id)s",
|
||||
"admin_or_owner": "rule:admin_required or rule:owner",
|
||||
|
||||
"default": "rule:admin_required",
|
||||
|
||||
"identity:get_region": "",
|
||||
"identity:list_regions": "",
|
||||
"identity:create_region": "rule:admin_required",
|
||||
"identity:update_region": "rule:admin_required",
|
||||
"identity:delete_region": "rule:admin_required",
|
||||
|
||||
"identity:get_service": "rule:admin_required",
|
||||
"identity:list_services": "rule:admin_required",
|
||||
"identity:create_service": "rule:admin_required",
|
||||
"identity:update_service": "rule:admin_required",
|
||||
"identity:delete_service": "rule:admin_required",
|
||||
|
||||
"identity:get_endpoint": "rule:admin_required",
|
||||
"identity:list_endpoints": "rule:admin_required",
|
||||
"identity:create_endpoint": "rule:admin_required",
|
||||
"identity:update_endpoint": "rule:admin_required",
|
||||
"identity:delete_endpoint": "rule:admin_required",
|
||||
|
||||
"identity:get_domain": "rule:admin_required",
|
||||
"identity:list_domains": "rule:admin_required",
|
||||
"identity:create_domain": "rule:admin_required",
|
||||
"identity:update_domain": "rule:admin_required",
|
||||
"identity:delete_domain": "rule:admin_required",
|
||||
|
||||
"identity:get_project": "rule:admin_required",
|
||||
"identity:list_projects": "rule:admin_required",
|
||||
"identity:list_user_projects": "rule:admin_or_owner",
|
||||
"identity:create_project": "rule:admin_required",
|
||||
"identity:update_project": "rule:admin_required",
|
||||
"identity:delete_project": "rule:admin_required",
|
||||
|
||||
"identity:get_user": "rule:admin_required",
|
||||
"identity:list_users": "rule:admin_required",
|
||||
"identity:create_user": "rule:admin_required",
|
||||
"identity:update_user": "rule:admin_required",
|
||||
"identity:delete_user": "rule:admin_required",
|
||||
"identity:change_password": "rule:admin_or_owner",
|
||||
|
||||
"identity:get_group": "rule:admin_required",
|
||||
"identity:list_groups": "rule:admin_required",
|
||||
"identity:list_groups_for_user": "rule:admin_or_owner",
|
||||
"identity:create_group": "rule:admin_required",
|
||||
"identity:update_group": "rule:admin_required",
|
||||
"identity:delete_group": "rule:admin_required",
|
||||
"identity:list_users_in_group": "rule:admin_required",
|
||||
"identity:remove_user_from_group": "rule:admin_required",
|
||||
"identity:check_user_in_group": "rule:admin_required",
|
||||
"identity:add_user_to_group": "rule:admin_required",
|
||||
|
||||
"identity:get_credential": "rule:admin_required",
|
||||
"identity:list_credentials": "rule:admin_required",
|
||||
"identity:create_credential": "rule:admin_required",
|
||||
"identity:update_credential": "rule:admin_required",
|
||||
"identity:delete_credential": "rule:admin_required",
|
||||
|
||||
"identity:ec2_get_credential": "rule:admin_or_owner",
|
||||
"identity:ec2_list_credentials": "rule:admin_or_owner",
|
||||
"identity:ec2_create_credential": "rule:admin_or_owner",
|
||||
"identity:ec2_delete_credential": "rule:admin_required or (rule:owner and user_id:%(target.credential.user_id)s)",
|
||||
|
||||
"identity:get_role": "rule:admin_required",
|
||||
"identity:list_roles": "rule:admin_required",
|
||||
"identity:create_role": "rule:admin_required",
|
||||
"identity:update_role": "rule:admin_required",
|
||||
"identity:delete_role": "rule:admin_required",
|
||||
|
||||
"identity:check_grant": "rule:admin_required",
|
||||
"identity:list_grants": "rule:admin_required",
|
||||
"identity:create_grant": "rule:admin_required",
|
||||
"identity:revoke_grant": "rule:admin_required",
|
||||
|
||||
"identity:list_role_assignments": "rule:admin_required",
|
||||
|
||||
"identity:get_policy": "rule:admin_required",
|
||||
"identity:list_policies": "rule:admin_required",
|
||||
"identity:create_policy": "rule:admin_required",
|
||||
"identity:update_policy": "rule:admin_required",
|
||||
"identity:delete_policy": "rule:admin_required",
|
||||
|
||||
"identity:check_token": "rule:admin_required",
|
||||
"identity:validate_token": "rule:service_or_admin",
|
||||
"identity:validate_token_head": "rule:service_or_admin",
|
||||
"identity:revocation_list": "rule:service_or_admin",
|
||||
"identity:revoke_token": "rule:admin_or_owner",
|
||||
|
||||
"identity:create_trust": "user_id:%(trust.trustor_user_id)s",
|
||||
"identity:get_trust": "rule:admin_or_owner",
|
||||
"identity:list_trusts": "",
|
||||
"identity:list_roles_for_trust": "",
|
||||
"identity:check_role_for_trust": "",
|
||||
"identity:get_role_for_trust": "",
|
||||
"identity:delete_trust": "",
|
||||
|
||||
"identity:create_consumer": "rule:admin_required",
|
||||
"identity:get_consumer": "rule:admin_required",
|
||||
"identity:list_consumers": "rule:admin_required",
|
||||
"identity:delete_consumer": "rule:admin_required",
|
||||
"identity:update_consumer": "rule:admin_required",
|
||||
|
||||
"identity:authorize_request_token": "rule:admin_required",
|
||||
"identity:list_access_token_roles": "rule:admin_required",
|
||||
"identity:get_access_token_role": "rule:admin_required",
|
||||
"identity:list_access_tokens": "rule:admin_required",
|
||||
"identity:get_access_token": "rule:admin_required",
|
||||
"identity:delete_access_token": "rule:admin_required",
|
||||
|
||||
"identity:list_projects_for_endpoint": "rule:admin_required",
|
||||
"identity:add_endpoint_to_project": "rule:admin_required",
|
||||
"identity:check_endpoint_in_project": "rule:admin_required",
|
||||
"identity:list_endpoints_for_project": "rule:admin_required",
|
||||
"identity:remove_endpoint_from_project": "rule:admin_required",
|
||||
|
||||
"identity:create_endpoint_group": "rule:admin_required",
|
||||
"identity:list_endpoint_groups": "rule:admin_required",
|
||||
"identity:get_endpoint_group": "rule:admin_required",
|
||||
"identity:update_endpoint_group": "rule:admin_required",
|
||||
"identity:delete_endpoint_group": "rule:admin_required",
|
||||
"identity:list_projects_associated_with_endpoint_group": "rule:admin_required",
|
||||
"identity:list_endpoints_associated_with_endpoint_group": "rule:admin_required",
|
||||
"identity:list_endpoint_groups_for_project": "rule:admin_required",
|
||||
"identity:add_endpoint_group_to_project": "rule:admin_required",
|
||||
"identity:remove_endpoint_group_from_project": "rule:admin_required",
|
||||
|
||||
"identity:create_identity_provider": "rule:admin_required",
|
||||
"identity:list_identity_providers": "rule:admin_required",
|
||||
"identity:get_identity_providers": "rule:admin_required",
|
||||
"identity:update_identity_provider": "rule:admin_required",
|
||||
"identity:delete_identity_provider": "rule:admin_required",
|
||||
|
||||
"identity:create_protocol": "rule:admin_required",
|
||||
"identity:update_protocol": "rule:admin_required",
|
||||
"identity:get_protocol": "rule:admin_required",
|
||||
"identity:list_protocols": "rule:admin_required",
|
||||
"identity:delete_protocol": "rule:admin_required",
|
||||
|
||||
"identity:create_mapping": "rule:admin_required",
|
||||
"identity:get_mapping": "rule:admin_required",
|
||||
"identity:list_mappings": "rule:admin_required",
|
||||
"identity:delete_mapping": "rule:admin_required",
|
||||
"identity:update_mapping": "rule:admin_required",
|
||||
|
||||
"identity:get_auth_catalog": "",
|
||||
"identity:get_auth_projects": "",
|
||||
"identity:get_auth_domains": "",
|
||||
|
||||
"identity:list_projects_for_groups": "",
|
||||
"identity:list_domains_for_groups": "",
|
||||
|
||||
"identity:list_revoke_events": "",
|
||||
|
||||
"identity:create_policy_association_for_endpoint": "rule:admin_required",
|
||||
"identity:check_policy_association_for_endpoint": "rule:admin_required",
|
||||
"identity:delete_policy_association_for_endpoint": "rule:admin_required",
|
||||
"identity:create_policy_association_for_service": "rule:admin_required",
|
||||
"identity:check_policy_association_for_service": "rule:admin_required",
|
||||
"identity:delete_policy_association_for_service": "rule:admin_required",
|
||||
"identity:create_policy_association_for_region_and_service": "rule:admin_required",
|
||||
"identity:check_policy_association_for_region_and_service": "rule:admin_required",
|
||||
"identity:delete_policy_association_for_region_and_service": "rule:admin_required",
|
||||
"identity:get_policy_for_endpoint": "rule:admin_required",
|
||||
"identity:list_endpoints_for_policy": "rule:admin_required"
|
||||
}
|
@ -1,6 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: keystone role
|
||||
#TODO: not implemented in module
|
||||
pause: seconds=1
|
@ -1,7 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: install python-keystoneclient
|
||||
shell: pip install python-keystoneclient
|
||||
- name: keystone role
|
||||
keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} user={{user_name}} tenant={{tenant_name}} role={{role_name}} state=present
|
@ -1,33 +0,0 @@
|
||||
id: keystone_role
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value:
|
||||
admin_token:
|
||||
schema: str!
|
||||
value:
|
||||
user_name:
|
||||
schema: str!
|
||||
value: admin
|
||||
tenant_name:
|
||||
schema: str!
|
||||
value:
|
||||
role_name:
|
||||
schema: str!
|
||||
value: admin
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/keystone_role, resources/keystone]
|
@ -1,9 +0,0 @@
|
||||
# TODO
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: keystone container
|
||||
docker:
|
||||
image: {{ image }}
|
||||
name: {{ name }}
|
||||
state: absent
|
@ -1,19 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: keystone container
|
||||
docker:
|
||||
command: /bin/bash -c "keystone-manage db_sync && /usr/bin/keystone-all"
|
||||
name: {{ name }}
|
||||
image: {{ image }}
|
||||
state: running
|
||||
expose:
|
||||
- 5000
|
||||
- 35357
|
||||
ports:
|
||||
- {{ port }}:5000
|
||||
- {{ admin_port }}:35357
|
||||
volumes:
|
||||
- {{ config_dir }}:/etc/keystone
|
||||
- name: wait for keystone
|
||||
wait_for: host={{ip}} port={{port}} timeout=20
|
@ -1,27 +0,0 @@
|
||||
id: keystone_service
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
image:
|
||||
schema: str!
|
||||
value: kollaglue/centos-rdo-j-keystone
|
||||
config_dir:
|
||||
schema: str!
|
||||
value: /etc/solar/keystone
|
||||
port:
|
||||
schema: int!
|
||||
value: 5000
|
||||
admin_port:
|
||||
schema: int!
|
||||
value: 35357
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/keystone_service, resources/keystone]
|
@ -1,20 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
vars:
|
||||
ip: {{ip}}
|
||||
port: {{port}}
|
||||
admin_port: {{admin_port}}
|
||||
tasks:
|
||||
- name: remove keystone service and endpoint
|
||||
keystone_service:
|
||||
token: {{admin_token}}
|
||||
name: {{name}}
|
||||
type: {{type}}
|
||||
description: {{description}}
|
||||
publicurl: {{publicurl}}
|
||||
internalurl: {{internalurl}}
|
||||
adminurl: {{adminurl}}
|
||||
region: "RegionOne"
|
||||
state: present
|
||||
endpoint: http://{{keystone_host}}:{{keystone_port}}/v2.0/
|
||||
|
@ -1,19 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
vars:
|
||||
ip: {{ip}}
|
||||
port: {{port}}
|
||||
admin_port: {{admin_port}}
|
||||
tasks:
|
||||
- name: keystone service and endpoint
|
||||
keystone_service:
|
||||
token: {{admin_token}}
|
||||
name: {{name}}
|
||||
type: {{type}}
|
||||
description: {{description}}
|
||||
publicurl: {{publicurl}}
|
||||
internalurl: {{internalurl}}
|
||||
adminurl: {{adminurl}}
|
||||
region: "RegionOne"
|
||||
state: present
|
||||
endpoint: http://{{keystone_host}}:{{keystone_port}}/v2.0/
|
@ -1,48 +0,0 @@
|
||||
id: keystone_service_endpoint
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value:
|
||||
admin_token:
|
||||
schema: str!
|
||||
value:
|
||||
port:
|
||||
schema: int!
|
||||
value:
|
||||
admin_port:
|
||||
schema: int!
|
||||
value:
|
||||
type:
|
||||
schema: str!
|
||||
value: identity
|
||||
description:
|
||||
schema: str!
|
||||
value: OpenStack Identity Service
|
||||
publicurl:
|
||||
schema: str!
|
||||
value: http://{{ip}}:{{port}}/v2.0
|
||||
internalurl:
|
||||
schema: str!
|
||||
value: http://{{ip}}:{{port}}/v2.0
|
||||
adminurl:
|
||||
schema: str!
|
||||
value: http://{{ip}}:{{admin_port}}/v2.0
|
||||
adminurl:
|
||||
schema: str!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/keystone_service_endpoint, resources/keystone]
|
@ -1,5 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: keystone tenant
|
||||
keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} tenant={{tenant_name}} state=absent
|
@ -1,7 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: install python-keystoneclient
|
||||
shell: pip install python-keystoneclient
|
||||
- name: keystone tenant
|
||||
keystone_user: endpoint=http://{{keystone_host}}:{{keystone_port}}/v2.0/ token={{admin_token}} tenant={{tenant_name}} state=present
|
@ -1,27 +0,0 @@
|
||||
id: keystone_tenant
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value:
|
||||
admin_token:
|
||||
schema: str!
|
||||
value:
|
||||
tenant_name:
|
||||
schema: str!
|
||||
value: admin
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/keystone_tenant, resources/keystone]
|
@ -1,6 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: keystone user
|
||||
#TODO: not implemented in module
|
||||
pause: seconds=1
|
@ -1,7 +0,0 @@
|
||||
- hosts: [{{ ip }}]
|
||||
sudo: yes
|
||||
tasks:
|
||||
- name: install python-keystoneclient
|
||||
shell: pip install python-keystoneclient
|
||||
- name: keystone user
|
||||
keystone_user: endpoint=http://{{ keystone_host }}:{{ keystone_port }}/v2.0/ token={{ admin_token }} user={{ user_name }} password={{ user_password }} tenant={{ tenant_name }} state=present
|
@ -1,33 +0,0 @@
|
||||
id: keystone_user
|
||||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
keystone_host:
|
||||
schema: str!
|
||||
value:
|
||||
keystone_port:
|
||||
schema: int!
|
||||
value:
|
||||
admin_token:
|
||||
schema: str!
|
||||
value:
|
||||
user_name:
|
||||
schema: str!
|
||||
value: admin
|
||||
user_password:
|
||||
schema: str!
|
||||
value: admin
|
||||
tenant_name:
|
||||
schema: str!
|
||||
value:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_key:
|
||||
schema: str!
|
||||
value:
|
||||
ssh_user:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: [resource/keystone_user, resources/keystone]
|
@ -1,5 +1,4 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
import copy
|
||||
import os
|
||||
|
||||
from copy import deepcopy
|
||||
@ -160,8 +159,15 @@ class Resource(object):
|
||||
|
||||
|
||||
def create(name, base_path, args, tags=[], connections={}):
|
||||
from solar.core import resource_provider
|
||||
|
||||
if isinstance(base_path, resource_provider.BaseProvider):
|
||||
base_path = base_path.directory
|
||||
|
||||
if not os.path.exists(base_path):
|
||||
raise Exception('Base resource does not exist: {0}'.format(base_path))
|
||||
raise Exception(
|
||||
'Base resource does not exist: {0}'.format(base_path)
|
||||
)
|
||||
|
||||
base_meta_file = os.path.join(base_path, 'meta.yaml')
|
||||
actions_path = os.path.join(base_path, 'actions')
|
||||
|
52
solar/solar/core/resource_provider.py
Normal file
52
solar/solar/core/resource_provider.py
Normal file
@ -0,0 +1,52 @@
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
from solar import utils
|
||||
|
||||
|
||||
class BaseProvider(object):
|
||||
def run(self):
|
||||
pass
|
||||
|
||||
|
||||
class DirectoryProvider(BaseProvider):
|
||||
def __init__(self, directory):
|
||||
self.directory = directory
|
||||
|
||||
|
||||
class GitProvider(BaseProvider):
|
||||
def __init__(self, repository, path='.'):
|
||||
self.repository = repository
|
||||
self.path = path
|
||||
|
||||
repo_name = os.path.split(self.repository)[1]
|
||||
|
||||
resources_directory = os.path.join(
|
||||
utils.read_config()['resources-directory'],
|
||||
repo_name
|
||||
)
|
||||
|
||||
with open('/tmp/git-provider.yaml', 'w') as f:
|
||||
f.write("""
|
||||
---
|
||||
|
||||
- hosts: all
|
||||
tasks:
|
||||
- git: repo={repository} dest={destination} clone={clone} update=yes
|
||||
""".format(
|
||||
repository=self.repository,
|
||||
destination=resources_directory,
|
||||
clone='no' if os.path.exists(resources_directory) else 'yes'
|
||||
))
|
||||
|
||||
subprocess.check_call([
|
||||
'ansible-playbook',
|
||||
'-i', '"localhost,"',
|
||||
'-c', 'local',
|
||||
'/tmp/git-provider.yaml'
|
||||
])
|
||||
|
||||
if path != '.':
|
||||
self.directory = os.path.join(resources_directory, path)
|
||||
else:
|
||||
self.directory = resources_directory
|
Loading…
Reference in New Issue
Block a user