commit
8d86c83788
9
.config
Normal file
9
.config
Normal file
@ -0,0 +1,9 @@
|
||||
dblayer: riak
|
||||
redis:
|
||||
host: localhost
|
||||
port: '6379'
|
||||
solar_db:
|
||||
mode: riak
|
||||
host: localhost
|
||||
port: '8087'
|
||||
protocol: pbc
|
2
.gitignore
vendored
2
.gitignore
vendored
@ -25,6 +25,7 @@ celery*.log
|
||||
|
||||
*.dot
|
||||
*.png
|
||||
*.svg
|
||||
resources_compiled.py
|
||||
|
||||
# bootstrap
|
||||
@ -45,3 +46,4 @@ solar/.coverage
|
||||
|
||||
# pytest cache
|
||||
solar/.cache
|
||||
.config.override
|
||||
|
@ -1,7 +1,7 @@
|
||||
language: python
|
||||
python: 2.7
|
||||
env:
|
||||
- PIP_ACCEL_CACHE=$HOME/.pip-accel-cache
|
||||
- PIP_ACCEL_CACHE=$HOME/.pip-accel-cache SOLAR_CONFIG=$TRAVIS_BUILD_DIR/.config SOLAR_SOLAR_DB_HOST=localhost
|
||||
cache:
|
||||
directories:
|
||||
- $HOME/.pip-accel-cache
|
||||
@ -12,6 +12,6 @@ install:
|
||||
script:
|
||||
- cd solar && py.test --cov=solar -s solar && cd ..
|
||||
services:
|
||||
- redis-server
|
||||
- riak
|
||||
after_success:
|
||||
cd solar && coveralls
|
||||
|
25
Dockerfile
Normal file
25
Dockerfile
Normal file
@ -0,0 +1,25 @@
|
||||
FROM ubuntu:14.04
|
||||
|
||||
WORKDIR /
|
||||
|
||||
RUN apt-get update
|
||||
# Install pip's dependency: setuptools:
|
||||
RUN apt-get install -y python python-dev python-distribute python-pip
|
||||
RUN pip install ansible
|
||||
|
||||
ADD bootstrap/playbooks/celery.yaml /celery.yaml
|
||||
ADD solar /solar
|
||||
ADD solard /solard
|
||||
ADD resources /resources
|
||||
ADD templates /templates
|
||||
ADD run.sh /run.sh
|
||||
|
||||
|
||||
RUN apt-get install -y libffi-dev libssl-dev
|
||||
RUN pip install riak peewee
|
||||
RUN pip install -U setuptools>=17.1
|
||||
RUN cd /solar && python setup.py install
|
||||
RUN cd /solard && python setup.py install
|
||||
RUN ansible-playbook -v -i "localhost," -c local /celery.yaml --skip-tags slave
|
||||
|
||||
CMD ["/run.sh"]
|
@ -32,6 +32,9 @@
|
||||
- build-essential
|
||||
# for torrent transport
|
||||
- python-libtorrent
|
||||
# for riak python package
|
||||
- libffi-dev
|
||||
- libssl-dev
|
||||
|
||||
# PIP
|
||||
#- apt: name=python-pip state=absent
|
||||
|
31
docker-compose.yml
Normal file
31
docker-compose.yml
Normal file
@ -0,0 +1,31 @@
|
||||
solar-celery:
|
||||
image: solarproject/solar-celery
|
||||
# path inside of the container should be exactly the same as outside
|
||||
# because solar uses absolute path to find resoruce actions files
|
||||
volumes:
|
||||
- /vagrant/.vagrant:/vagrant/.vagrant
|
||||
- /vagrant/solar:/solar
|
||||
- /vagrant/solard:/solard
|
||||
- /vagrant/templates:/vagrant/templates
|
||||
- /vagrant/resources:/vagrant/resources
|
||||
environment:
|
||||
- REDIS_HOST=10.0.0.2
|
||||
- REDIS_PORT=6379
|
||||
- RIAK_HOST=10.0.0.2
|
||||
- RIAK_PORT=8087
|
||||
# links are not used for configuration because we can rely on non-container
|
||||
# based datastores
|
||||
links:
|
||||
- riak
|
||||
- redis
|
||||
riak:
|
||||
image: tutum/riak
|
||||
ports:
|
||||
- 8087:8087
|
||||
- 8098:8098
|
||||
redis:
|
||||
image: tutum/redis
|
||||
ports:
|
||||
- 6379:6379
|
||||
environment:
|
||||
- REDIS_PASS=**None**
|
19
examples/bootstrap/example-bootstrap.py
Normal file → Executable file
19
examples/bootstrap/example-bootstrap.py
Normal file → Executable file
@ -10,11 +10,7 @@ from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
||||
db = get_db()
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
|
||||
@click.group()
|
||||
@ -23,9 +19,7 @@ def main():
|
||||
|
||||
|
||||
def setup_resources():
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
node2 = vr.create('node2', 'resources/ro_node/', {
|
||||
'ip': '10.0.0.4',
|
||||
@ -61,7 +55,7 @@ def deploy():
|
||||
setup_resources()
|
||||
|
||||
# run
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = resource.load_all()
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in resources_to_run:
|
||||
@ -76,7 +70,7 @@ def deploy():
|
||||
|
||||
@click.command()
|
||||
def undeploy():
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = resource.load_all()
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
for name in reversed(resources_to_run):
|
||||
@ -85,10 +79,7 @@ def undeploy():
|
||||
except errors.SolarError as e:
|
||||
print 'WARNING: %s' % str(e)
|
||||
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
|
||||
ModelMeta.remove_all()
|
||||
|
||||
main.add_command(deploy)
|
||||
main.add_command(undeploy)
|
||||
|
16
examples/compiled-resources/example-compiled-resources.py
Normal file → Executable file
16
examples/compiled-resources/example-compiled-resources.py
Normal file → Executable file
@ -19,11 +19,9 @@ from solar.core import actions
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.dblayer.model import ModelMeta
|
||||
from solar.core.resource_provider import GitProvider, RemoteZipProvider
|
||||
|
||||
|
||||
import resources_compiled
|
||||
|
||||
|
||||
@ -34,9 +32,7 @@ def main():
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
db = get_db()
|
||||
db.clear()
|
||||
|
||||
ModelMeta.remove_all()
|
||||
signals.Connections.clear()
|
||||
|
||||
node1 = resources_compiled.RoNodeResource('node1', None, {})
|
||||
@ -75,18 +71,16 @@ def deploy():
|
||||
|
||||
@click.command()
|
||||
def undeploy():
|
||||
db = get_db()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
resources = map(resource.wrap_resource, db.get_list(collection=db.COLLECTIONS.resource))
|
||||
resources = resource.load_all()
|
||||
resources = {r.name: r for r in resources}
|
||||
|
||||
actions.resource_action(resources['openstack_rabbitmq_user'], 'remove')
|
||||
actions.resource_action(resources['openstack_vhost'], 'remove')
|
||||
actions.resource_action(resources['rabbitmq_service1'], 'remove')
|
||||
|
||||
db.clear()
|
||||
|
||||
signals.Connections.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
|
||||
main.add_command(deploy)
|
||||
|
@ -4,15 +4,11 @@ import time
|
||||
|
||||
from solar.core import signals
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
||||
db = get_db()
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
|
||||
def run():
|
||||
db.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
resources = vr.create('nodes', 'templates/nodes_with_transports.yaml', {'count': 2})
|
||||
nodes = [x for x in resources if x.name.startswith('node')]
|
||||
|
@ -1,10 +1,8 @@
|
||||
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from solar.dblayer.model import ModelMeta
|
||||
import yaml
|
||||
|
||||
db = get_db()
|
||||
|
||||
STORAGE = {'objects_ceph': True,
|
||||
'osd_pool_size': 2,
|
||||
@ -34,7 +32,7 @@ NETWORK_METADATA = yaml.load("""
|
||||
|
||||
|
||||
def deploy():
|
||||
db.clear()
|
||||
ModelMeta.remove_all()
|
||||
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 2})
|
||||
first_node, second_node = [x for x in resources if x.name.startswith('node')]
|
||||
first_transp = next(x for x in resources if x.name.startswith('transport'))
|
||||
|
8
examples/lxc/example-lxc.py
Normal file → Executable file
8
examples/lxc/example-lxc.py
Normal file → Executable file
@ -12,10 +12,10 @@ import click
|
||||
from solar.core import signals
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from solar.system_log import change
|
||||
from solar.cli import orch
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
@ -43,9 +43,7 @@ def lxc_template(idx):
|
||||
|
||||
@click.command()
|
||||
def deploy():
|
||||
db = get_db()
|
||||
db.clear()
|
||||
signals.Connections.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
node1 = vr.create('nodes', 'templates/nodes.yaml', {})[0]
|
||||
seed = vr.create('nodes', 'templates/seed_node.yaml', {})[0]
|
||||
|
@ -8,9 +8,7 @@ from solar.core import signals
|
||||
from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import events as evapi
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
PROFILE = False
|
||||
#PROFILE = True
|
||||
@ -35,8 +33,6 @@ if PROFILE:
|
||||
# Official puppet manifests, not fuel-library
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
@click.group()
|
||||
def main():
|
||||
@ -247,7 +243,7 @@ def setup_neutron(node, librarian, rabbitmq_service, openstack_rabbitmq_user, op
|
||||
return {'neutron_puppet': neutron_puppet}
|
||||
|
||||
def setup_neutron_api(node, mariadb_service, admin_user, keystone_puppet, services_tenant, neutron_puppet):
|
||||
# NEUTRON PLUGIN AND NEUTRON API (SERVER)
|
||||
# NEUTRON PLUGIN AND NEUTRON API (SERVER)
|
||||
neutron_plugins_ml2 = vr.create('neutron_plugins_ml2', 'resources/neutron_plugins_ml2_puppet', {})[0]
|
||||
node.connect(neutron_plugins_ml2)
|
||||
|
||||
@ -830,7 +826,7 @@ def create_compute(node):
|
||||
|
||||
@click.command()
|
||||
def create_all():
|
||||
db.clear()
|
||||
ModelMeta.remove_all()
|
||||
r = prepare_nodes(2)
|
||||
r.update(create_controller('node0'))
|
||||
r.update(create_compute('node1'))
|
||||
@ -856,7 +852,7 @@ def add_controller(node):
|
||||
|
||||
@click.command()
|
||||
def clear():
|
||||
db.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
7
examples/riak/riaks-template.py
Normal file → Executable file
7
examples/riak/riaks-template.py
Normal file → Executable file
@ -8,16 +8,13 @@ import click
|
||||
import sys
|
||||
|
||||
from solar.core import resource
|
||||
from solar.interfaces.db import get_db
|
||||
from solar import template
|
||||
|
||||
|
||||
db = get_db()
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
|
||||
def setup_riak():
|
||||
db.clear()
|
||||
|
||||
ModelMeta.remove_all()
|
||||
nodes = template.nodes_from('templates/riak_nodes.yaml')
|
||||
|
||||
riak_services = nodes.on_each(
|
||||
|
@ -20,22 +20,22 @@ from solar.core import validation
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
from solar.events.controls import React, Dep
|
||||
from solar.events.api import add_event
|
||||
|
||||
|
||||
db = get_db()
|
||||
from solar.dblayer.solar_models import Resource
|
||||
|
||||
|
||||
def setup_riak():
|
||||
db.clear()
|
||||
|
||||
ModelMeta.remove_all()
|
||||
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 3})
|
||||
nodes = [x for x in resources if x.name.startswith('node')]
|
||||
hosts_services = [x for x in resources if x.name.startswith('hosts_file')]
|
||||
node1, node2, node3 = nodes
|
||||
hosts_services = [x for x in resources if x.name.startswith('hosts_file')]
|
||||
|
||||
riak_services = []
|
||||
ips = '10.0.0.%d'
|
||||
@ -44,6 +44,7 @@ def setup_riak():
|
||||
r = vr.create('riak_service%d' % num,
|
||||
'resources/riak_node',
|
||||
{'riak_self_name': 'riak%d' % num,
|
||||
'storage_backend': 'leveldb',
|
||||
'riak_hostname': 'riak_server%d.solar' % num,
|
||||
'riak_name': 'riak%d@riak_server%d.solar' % (num, num)})[0]
|
||||
riak_services.append(r)
|
||||
@ -60,6 +61,7 @@ def setup_riak():
|
||||
{'riak_hostname': 'hosts:name',
|
||||
'ip': 'hosts:ip'})
|
||||
|
||||
Resource.save_all_lazy()
|
||||
errors = resource.validate_resources()
|
||||
for r, error in errors:
|
||||
click.echo('ERROR: %s: %s' % (r.name, error))
|
||||
|
@ -5,16 +5,11 @@ import time
|
||||
from solar.core import resource
|
||||
from solar.core import signals
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
|
||||
def run():
|
||||
db.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
node = vr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()),
|
||||
'ip': '10.0.0.3',
|
||||
|
@ -2,15 +2,11 @@ import time
|
||||
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar import errors
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
|
||||
db = get_db()
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
|
||||
def run():
|
||||
db.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
node = vr.create('node', 'resources/ro_node', {'name': 'first' + str(time.time()),
|
||||
'ip': '10.0.0.3',
|
||||
|
@ -1,61 +1,61 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$apache_name = $resource['input']['apache_name']['value']
|
||||
$service_name = $resource['input']['service_name']['value']
|
||||
$default_mods = $resource['input']['default_mods']['value']
|
||||
$default_vhost = $resource['input']['default_vhost']['value']
|
||||
$default_charset = $resource['input']['default_charset']['value']
|
||||
$default_confd_files = $resource['input']['default_confd_files']['value']
|
||||
$default_ssl_vhost = $resource['input']['default_ssl_vhost']['value']
|
||||
$default_ssl_cert = $resource['input']['default_ssl_cert']['value']
|
||||
$default_ssl_key = $resource['input']['default_ssl_key']['value']
|
||||
$default_ssl_chain = $resource['input']['default_ssl_chain']['value']
|
||||
$default_ssl_ca = $resource['input']['default_ssl_ca']['value']
|
||||
$default_ssl_crl_path = $resource['input']['default_ssl_crl_path']['value']
|
||||
$default_ssl_crl = $resource['input']['default_ssl_crl']['value']
|
||||
$default_ssl_crl_check = $resource['input']['default_ssl_crl_check']['value']
|
||||
$default_type = $resource['input']['default_type']['value']
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$service_restart = $resource['input']['service_restart']['value']
|
||||
$purge_configs = $resource['input']['purge_configs']['value']
|
||||
$purge_vhost_dir = $resource['input']['purge_vhost_dir']['value']
|
||||
$purge_vdir = $resource['input']['purge_vdir']['value']
|
||||
$serveradmin = $resource['input']['serveradmin']['value']
|
||||
$sendfile = $resource['input']['sendfile']['value']
|
||||
$error_documents = $resource['input']['error_documents']['value']
|
||||
$timeout = $resource['input']['timeout']['value']
|
||||
$httpd_dir = $resource['input']['httpd_dir']['value']
|
||||
$server_root = $resource['input']['server_root']['value']
|
||||
$conf_dir = $resource['input']['conf_dir']['value']
|
||||
$confd_dir = $resource['input']['confd_dir']['value']
|
||||
$vhost_dir = $resource['input']['vhost_dir']['value']
|
||||
$vhost_enable_dir = $resource['input']['vhost_enable_dir']['value']
|
||||
$mod_dir = $resource['input']['mod_dir']['value']
|
||||
$mod_enable_dir = $resource['input']['mod_enable_dir']['value']
|
||||
$mpm_module = $resource['input']['mpm_module']['value']
|
||||
$lib_path = $resource['input']['lib_path']['value']
|
||||
$conf_template = $resource['input']['conf_template']['value']
|
||||
$servername = $resource['input']['servername']['value']
|
||||
$manage_user = $resource['input']['manage_user']['value']
|
||||
$manage_group = $resource['input']['manage_group']['value']
|
||||
$user = $resource['input']['user']['value']
|
||||
$group = $resource['input']['group']['value']
|
||||
$keepalive = $resource['input']['keepalive']['value']
|
||||
$keepalive_timeout = $resource['input']['keepalive_timeout']['value']
|
||||
$max_keepalive_requests = $resource['input']['max_keepalive_requests']['value']
|
||||
$logroot = $resource['input']['logroot']['value']
|
||||
$logroot_mode = $resource['input']['logroot_mode']['value']
|
||||
$log_level = $resource['input']['log_level']['value']
|
||||
$log_formats = $resource['input']['log_formats']['value']
|
||||
$ports_file = $resource['input']['ports_file']['value']
|
||||
$docroot = $resource['input']['docroot']['value']
|
||||
$apache_version = $resource['input']['apache_version']['value']
|
||||
$server_tokens = $resource['input']['server_tokens']['value']
|
||||
$server_signature = $resource['input']['server_signature']['value']
|
||||
$trace_enable = $resource['input']['trace_enable']['value']
|
||||
$allow_encoded_slashes = $resource['input']['allow_encoded_slashes']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$use_optional_includes = $resource['input']['use_optional_includes']['value']
|
||||
$apache_name = $resource['input']['apache_name']
|
||||
$service_name = $resource['input']['service_name']
|
||||
$default_mods = $resource['input']['default_mods']
|
||||
$default_vhost = $resource['input']['default_vhost']
|
||||
$default_charset = $resource['input']['default_charset']
|
||||
$default_confd_files = $resource['input']['default_confd_files']
|
||||
$default_ssl_vhost = $resource['input']['default_ssl_vhost']
|
||||
$default_ssl_cert = $resource['input']['default_ssl_cert']
|
||||
$default_ssl_key = $resource['input']['default_ssl_key']
|
||||
$default_ssl_chain = $resource['input']['default_ssl_chain']
|
||||
$default_ssl_ca = $resource['input']['default_ssl_ca']
|
||||
$default_ssl_crl_path = $resource['input']['default_ssl_crl_path']
|
||||
$default_ssl_crl = $resource['input']['default_ssl_crl']
|
||||
$default_ssl_crl_check = $resource['input']['default_ssl_crl_check']
|
||||
$default_type = $resource['input']['default_type']
|
||||
$ip = $resource['input']['ip']
|
||||
$service_restart = $resource['input']['service_restart']
|
||||
$purge_configs = $resource['input']['purge_configs']
|
||||
$purge_vhost_dir = $resource['input']['purge_vhost_dir']
|
||||
$purge_vdir = $resource['input']['purge_vdir']
|
||||
$serveradmin = $resource['input']['serveradmin']
|
||||
$sendfile = $resource['input']['sendfile']
|
||||
$error_documents = $resource['input']['error_documents']
|
||||
$timeout = $resource['input']['timeout']
|
||||
$httpd_dir = $resource['input']['httpd_dir']
|
||||
$server_root = $resource['input']['server_root']
|
||||
$conf_dir = $resource['input']['conf_dir']
|
||||
$confd_dir = $resource['input']['confd_dir']
|
||||
$vhost_dir = $resource['input']['vhost_dir']
|
||||
$vhost_enable_dir = $resource['input']['vhost_enable_dir']
|
||||
$mod_dir = $resource['input']['mod_dir']
|
||||
$mod_enable_dir = $resource['input']['mod_enable_dir']
|
||||
$mpm_module = $resource['input']['mpm_module']
|
||||
$lib_path = $resource['input']['lib_path']
|
||||
$conf_template = $resource['input']['conf_template']
|
||||
$servername = $resource['input']['servername']
|
||||
$manage_user = $resource['input']['manage_user']
|
||||
$manage_group = $resource['input']['manage_group']
|
||||
$user = $resource['input']['user']
|
||||
$group = $resource['input']['group']
|
||||
$keepalive = $resource['input']['keepalive']
|
||||
$keepalive_timeout = $resource['input']['keepalive_timeout']
|
||||
$max_keepalive_requests = $resource['input']['max_keepalive_requests']
|
||||
$logroot = $resource['input']['logroot']
|
||||
$logroot_mode = $resource['input']['logroot_mode']
|
||||
$log_level = $resource['input']['log_level']
|
||||
$log_formats = $resource['input']['log_formats']
|
||||
$ports_file = $resource['input']['ports_file']
|
||||
$docroot = $resource['input']['docroot']
|
||||
$apache_version = $resource['input']['apache_version']
|
||||
$server_tokens = $resource['input']['server_tokens']
|
||||
$server_signature = $resource['input']['server_signature']
|
||||
$trace_enable = $resource['input']['trace_enable']
|
||||
$allow_encoded_slashes = $resource['input']['allow_encoded_slashes']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$use_optional_includes = $resource['input']['use_optional_includes']
|
||||
|
||||
class {'apache':
|
||||
apache_name => $apache_name,
|
||||
|
@ -1,24 +1,24 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$keystone_enabled = $resource['input']['keystone_enabled']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$keystone_auth_host = $resource['input']['keystone_auth_host']['value']
|
||||
$keystone_auth_port = $resource['input']['keystone_auth_port']['value']
|
||||
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']['value']
|
||||
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']['value']
|
||||
$keystone_auth_uri = $resource['input']['keystone_auth_uri']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$service_port = $resource['input']['service_port']['value']
|
||||
$service_workers = $resource['input']['service_workers']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$ratelimits = $resource['input']['ratelimits']['value']
|
||||
$default_volume_type = $resource['input']['default_volume_type']['value']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']
|
||||
$keystone_enabled = $resource['input']['keystone_enabled']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']
|
||||
$keystone_user = $resource['input']['keystone_user']
|
||||
$keystone_auth_host = $resource['input']['keystone_auth_host']
|
||||
$keystone_auth_port = $resource['input']['keystone_auth_port']
|
||||
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']
|
||||
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']
|
||||
$keystone_auth_uri = $resource['input']['keystone_auth_uri']
|
||||
$os_region_name = $resource['input']['os_region_name']
|
||||
$service_port = $resource['input']['service_port']
|
||||
$service_workers = $resource['input']['service_workers']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$ratelimits = $resource['input']['ratelimits']
|
||||
$default_volume_type = $resource['input']['default_volume_type']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']
|
||||
$validate = $resource['input']['validate']
|
||||
$validation_options = $resource['input']['validation_options']
|
||||
|
||||
include cinder::params
|
||||
|
||||
|
@ -1,24 +1,24 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$keystone_enabled = $resource['input']['keystone_enabled']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$keystone_auth_host = $resource['input']['keystone_auth_host']['value']
|
||||
$keystone_auth_port = $resource['input']['keystone_auth_port']['value']
|
||||
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']['value']
|
||||
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']['value']
|
||||
$keystone_auth_uri = $resource['input']['keystone_auth_uri']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$service_port = $resource['input']['service_port']['value']
|
||||
$service_workers = $resource['input']['service_workers']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$ratelimits = $resource['input']['ratelimits']['value']
|
||||
$default_volume_type = $resource['input']['default_volume_type']['value']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']
|
||||
$keystone_enabled = $resource['input']['keystone_enabled']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']
|
||||
$keystone_user = $resource['input']['keystone_user']
|
||||
$keystone_auth_host = $resource['input']['keystone_auth_host']
|
||||
$keystone_auth_port = $resource['input']['keystone_auth_port']
|
||||
$keystone_auth_protocol = $resource['input']['keystone_auth_protocol']
|
||||
$keystone_auth_admin_prefix = $resource['input']['keystone_auth_admin_prefix']
|
||||
$keystone_auth_uri = $resource['input']['keystone_auth_uri']
|
||||
$os_region_name = $resource['input']['os_region_name']
|
||||
$service_port = $resource['input']['service_port']
|
||||
$service_workers = $resource['input']['service_workers']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$ratelimits = $resource['input']['ratelimits']
|
||||
$default_volume_type = $resource['input']['default_volume_type']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']
|
||||
$validate = $resource['input']['validate']
|
||||
$validation_options = $resource['input']['validation_options']
|
||||
|
||||
include cinder::params
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$glance_api_version = $resource['input']['glance_api_version']['value']
|
||||
$glance_num_retries = $resource['input']['glance_num_retries']['value']
|
||||
$glance_api_insecure = $resource['input']['glance_api_insecure']['value']
|
||||
$glance_api_ssl_compression = $resource['input']['glance_api_ssl_compression']['value']
|
||||
$glance_request_timeout = $resource['input']['glance_request_timeout']['value']
|
||||
$glance_api_servers_host = $resource['input']['glance_api_servers_host']['value']
|
||||
$glance_api_servers_port = $resource['input']['glance_api_servers_port']['value']
|
||||
$glance_api_version = $resource['input']['glance_api_version']
|
||||
$glance_num_retries = $resource['input']['glance_num_retries']
|
||||
$glance_api_insecure = $resource['input']['glance_api_insecure']
|
||||
$glance_api_ssl_compression = $resource['input']['glance_api_ssl_compression']
|
||||
$glance_request_timeout = $resource['input']['glance_request_timeout']
|
||||
$glance_api_servers_host = $resource['input']['glance_api_servers_host']
|
||||
$glance_api_servers_port = $resource['input']['glance_api_servers_port']
|
||||
|
||||
class {'cinder::glance':
|
||||
glance_api_servers => "${glance_api_servers_host}:${glance_api_servers_port}",
|
||||
|
@ -1,65 +1,65 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$database_min_pool_size = $resource['input']['database_min_pool_size']['value']
|
||||
$database_max_pool_size = $resource['input']['database_max_pool_size']['value']
|
||||
$database_max_retries = $resource['input']['database_max_retries']['value']
|
||||
$database_retry_interval = $resource['input']['database_retry_interval']['value']
|
||||
$database_max_overflow = $resource['input']['database_max_overflow']['value']
|
||||
$rpc_backend = $resource['input']['rpc_backend']['value']
|
||||
$control_exchange = $resource['input']['control_exchange']['value']
|
||||
$rabbit_host = $resource['input']['rabbit_host']['value']
|
||||
$rabbit_port = $resource['input']['rabbit_port']['value']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']['value']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value']
|
||||
$rabbit_userid = $resource['input']['rabbit_userid']['value']
|
||||
$rabbit_password = $resource['input']['rabbit_password']['value']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value']
|
||||
$amqp_durable_queues = $resource['input']['amqp_durable_queues']['value']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']['value']
|
||||
$qpid_port = $resource['input']['qpid_port']['value']
|
||||
$qpid_username = $resource['input']['qpid_username']['value']
|
||||
$qpid_password = $resource['input']['qpid_password']['value']
|
||||
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']['value']
|
||||
$qpid_reconnect = $resource['input']['qpid_reconnect']['value']
|
||||
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']['value']
|
||||
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']['value']
|
||||
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']['value']
|
||||
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']['value']
|
||||
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']['value']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']['value']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$use_ssl = $resource['input']['use_ssl']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$api_paste_config = $resource['input']['api_paste_config']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$storage_availability_zone = $resource['input']['storage_availability_zone']['value']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$database_connection = $resource['input']['database_connection']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$database_min_pool_size = $resource['input']['database_min_pool_size']
|
||||
$database_max_pool_size = $resource['input']['database_max_pool_size']
|
||||
$database_max_retries = $resource['input']['database_max_retries']
|
||||
$database_retry_interval = $resource['input']['database_retry_interval']
|
||||
$database_max_overflow = $resource['input']['database_max_overflow']
|
||||
$rpc_backend = $resource['input']['rpc_backend']
|
||||
$control_exchange = $resource['input']['control_exchange']
|
||||
$rabbit_host = $resource['input']['rabbit_host']
|
||||
$rabbit_port = $resource['input']['rabbit_port']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']
|
||||
$rabbit_userid = $resource['input']['rabbit_userid']
|
||||
$rabbit_password = $resource['input']['rabbit_password']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']
|
||||
$amqp_durable_queues = $resource['input']['amqp_durable_queues']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']
|
||||
$qpid_port = $resource['input']['qpid_port']
|
||||
$qpid_username = $resource['input']['qpid_username']
|
||||
$qpid_password = $resource['input']['qpid_password']
|
||||
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']
|
||||
$qpid_reconnect = $resource['input']['qpid_reconnect']
|
||||
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']
|
||||
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']
|
||||
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']
|
||||
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']
|
||||
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$use_ssl = $resource['input']['use_ssl']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$api_paste_config = $resource['input']['api_paste_config']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$storage_availability_zone = $resource['input']['storage_availability_zone']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
# Do not apply the legacy stuff
|
||||
#$sql_connection = $resource['input']['sql_connection']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
#$sql_connection = $resource['input']['sql_connection']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
|
||||
class {'cinder':
|
||||
database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}",
|
||||
|
@ -1,7 +1,7 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$scheduler_driver = $resource['input']['scheduler_driver']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$scheduler_driver = $resource['input']['scheduler_driver']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
|
||||
include cinder::params
|
||||
|
||||
|
@ -1,7 +1,7 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$scheduler_driver = $resource['input']['scheduler_driver']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$scheduler_driver = $resource['input']['scheduler_driver']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
|
||||
include cinder::params
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$use_iscsi_backend = $resource['input']['use_iscsi_backend']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$use_iscsi_backend = $resource['input']['use_iscsi_backend']
|
||||
|
||||
$iscsi_ip_address = $resource['input']['iscsi_ip_address']['value']
|
||||
$volume_driver = $resource['input']['volume_driver']['value']
|
||||
$volume_group = $resource['input']['volume_group']['value']
|
||||
$iscsi_helper = $resource['input']['iscsi_helper']['value']
|
||||
$iscsi_ip_address = $resource['input']['iscsi_ip_address']
|
||||
$volume_driver = $resource['input']['volume_driver']
|
||||
$volume_group = $resource['input']['volume_group']
|
||||
$iscsi_helper = $resource['input']['iscsi_helper']
|
||||
|
||||
include cinder::params
|
||||
|
||||
|
@ -1,12 +1,12 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$use_iscsi_backend = $resource['input']['use_iscsi_backend']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$use_iscsi_backend = $resource['input']['use_iscsi_backend']
|
||||
|
||||
$iscsi_ip_address = $resource['input']['iscsi_ip_address']['value']
|
||||
$volume_driver = $resource['input']['volume_driver']['value']
|
||||
$volume_group = $resource['input']['volume_group']['value']
|
||||
$iscsi_helper = $resource['input']['iscsi_helper']['value']
|
||||
$iscsi_ip_address = $resource['input']['iscsi_ip_address']
|
||||
$volume_driver = $resource['input']['volume_driver']
|
||||
$volume_group = $resource['input']['volume_group']
|
||||
$iscsi_helper = $resource['input']['iscsi_helper']
|
||||
|
||||
include cinder::params
|
||||
|
||||
|
@ -1,53 +1,53 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
|
||||
$filesystem_store_datadir = $resource['input']['filesystem_store_datadir']['value']
|
||||
$filesystem_store_datadir = $resource['input']['filesystem_store_datadir']
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$bind_port = $resource['input']['bind_port']['value']
|
||||
$backlog = $resource['input']['backlog']['value']
|
||||
$workers = $resource['input']['workers']['value']
|
||||
$log_file = $resource['input']['log_file']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$registry_host = $resource['input']['registry_host']['value']
|
||||
$registry_port = $resource['input']['registry_port']['value']
|
||||
$registry_client_protocol = $resource['input']['registry_client_protocol']['value']
|
||||
$auth_type = $resource['input']['auth_type']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_url = $resource['input']['auth_url']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$pipeline = $resource['input']['pipeline']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$show_image_direct_url = $resource['input']['show_image_direct_url']['value']
|
||||
$purge_config = $resource['input']['purge_config']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$known_stores = $resource['input']['known_stores']['value']
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$image_cache_dir = $resource['input']['image_cache_dir']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$bind_port = $resource['input']['bind_port']
|
||||
$backlog = $resource['input']['backlog']
|
||||
$workers = $resource['input']['workers']
|
||||
$log_file = $resource['input']['log_file']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$registry_host = $resource['input']['registry_host']
|
||||
$registry_port = $resource['input']['registry_port']
|
||||
$registry_client_protocol = $resource['input']['registry_client_protocol']
|
||||
$auth_type = $resource['input']['auth_type']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_url = $resource['input']['auth_url']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$pipeline = $resource['input']['pipeline']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']
|
||||
$keystone_user = $resource['input']['keystone_user']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$show_image_direct_url = $resource['input']['show_image_direct_url']
|
||||
$purge_config = $resource['input']['purge_config']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$known_stores = $resource['input']['known_stores']
|
||||
$database_connection = $resource['input']['database_connection']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$image_cache_dir = $resource['input']['image_cache_dir']
|
||||
$os_region_name = $resource['input']['os_region_name']
|
||||
$validate = $resource['input']['validate']
|
||||
$validation_options = $resource['input']['validation_options']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
|
||||
class {'glance':
|
||||
package_ensure => 'present',
|
||||
|
@ -1,53 +1,53 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
|
||||
$filesystem_store_datadir = $resource['input']['filesystem_store_datadir']['value']
|
||||
$filesystem_store_datadir = $resource['input']['filesystem_store_datadir']
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$bind_port = $resource['input']['bind_port']['value']
|
||||
$backlog = $resource['input']['backlog']['value']
|
||||
$workers = $resource['input']['workers']['value']
|
||||
$log_file = $resource['input']['log_file']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$registry_host = $resource['input']['registry_host']['value']
|
||||
$registry_port = $resource['input']['registry_port']['value']
|
||||
$registry_client_protocol = $resource['input']['registry_client_protocol']['value']
|
||||
$auth_type = $resource['input']['auth_type']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_url = $resource['input']['auth_url']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$pipeline = $resource['input']['pipeline']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$show_image_direct_url = $resource['input']['show_image_direct_url']['value']
|
||||
$purge_config = $resource['input']['purge_config']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$known_stores = $resource['input']['known_stores']['value']
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$image_cache_dir = $resource['input']['image_cache_dir']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$bind_port = $resource['input']['bind_port']
|
||||
$backlog = $resource['input']['backlog']
|
||||
$workers = $resource['input']['workers']
|
||||
$log_file = $resource['input']['log_file']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$registry_host = $resource['input']['registry_host']
|
||||
$registry_port = $resource['input']['registry_port']
|
||||
$registry_client_protocol = $resource['input']['registry_client_protocol']
|
||||
$auth_type = $resource['input']['auth_type']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_url = $resource['input']['auth_url']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$pipeline = $resource['input']['pipeline']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']
|
||||
$keystone_user = $resource['input']['keystone_user']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$show_image_direct_url = $resource['input']['show_image_direct_url']
|
||||
$purge_config = $resource['input']['purge_config']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$known_stores = $resource['input']['known_stores']
|
||||
$database_connection = $resource['input']['database_connection']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$image_cache_dir = $resource['input']['image_cache_dir']
|
||||
$os_region_name = $resource['input']['os_region_name']
|
||||
$validate = $resource['input']['validate']
|
||||
$validation_options = $resource['input']['validation_options']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
|
||||
class {'glance':
|
||||
package_ensure => 'present',
|
||||
|
@ -1,42 +1,42 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$bind_port = $resource['input']['bind_port']['value']
|
||||
$log_file = $resource['input']['log_file']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$auth_type = $resource['input']['auth_type']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$pipeline = $resource['input']['pipeline']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$purge_config = $resource['input']['purge_config']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$sync_db = $resource['input']['sync_db']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
$sql_connection = $resource['input']['sql_connection']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$bind_port = $resource['input']['bind_port']
|
||||
$log_file = $resource['input']['log_file']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$database_connection = $resource['input']['database_connection']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$auth_type = $resource['input']['auth_type']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']
|
||||
$keystone_user = $resource['input']['keystone_user']
|
||||
$pipeline = $resource['input']['pipeline']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$purge_config = $resource['input']['purge_config']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$sync_db = $resource['input']['sync_db']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
$sql_connection = $resource['input']['sql_connection']
|
||||
|
||||
include glance::params
|
||||
|
||||
|
@ -1,42 +1,42 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
|
||||
$keystone_password = $resource['input']['keystone_password']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$bind_port = $resource['input']['bind_port']['value']
|
||||
$log_file = $resource['input']['log_file']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$auth_type = $resource['input']['auth_type']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']['value']
|
||||
$keystone_user = $resource['input']['keystone_user']['value']
|
||||
$pipeline = $resource['input']['pipeline']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$purge_config = $resource['input']['purge_config']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$sync_db = $resource['input']['sync_db']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
$sql_connection = $resource['input']['sql_connection']['value']
|
||||
$keystone_password = $resource['input']['keystone_password']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$bind_port = $resource['input']['bind_port']
|
||||
$log_file = $resource['input']['log_file']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$database_connection = $resource['input']['database_connection']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$auth_type = $resource['input']['auth_type']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$keystone_tenant = $resource['input']['keystone_tenant']
|
||||
$keystone_user = $resource['input']['keystone_user']
|
||||
$pipeline = $resource['input']['pipeline']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$purge_config = $resource['input']['purge_config']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$sync_db = $resource['input']['sync_db']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
$sql_connection = $resource['input']['sql_connection']
|
||||
|
||||
include glance::params
|
||||
|
||||
|
@ -10,7 +10,7 @@ input:
|
||||
value: {src: /etc/solar/haproxy, dst: /etc/haproxy}
|
||||
config:
|
||||
schema: [{backends: [{server: str!, port: int!}], listen_port: int!, protocol: str!, name: str!}]
|
||||
value: []
|
||||
value: [{}]
|
||||
# ssh_user:
|
||||
# schema: str!
|
||||
# value:
|
||||
|
@ -1,14 +1,14 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$admin_token = $resource['input']['admin_token']['value']
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$admin_port = $resource['input']['admin_port']['value']
|
||||
$port = $resource['input']['port']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
$admin_token = $resource['input']['admin_token']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_port = $resource['input']['db_port']
|
||||
$admin_port = $resource['input']['admin_port']
|
||||
$port = $resource['input']['port']
|
||||
|
||||
class {'keystone':
|
||||
package_ensure => 'present',
|
||||
|
@ -1,14 +1,14 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$admin_token = $resource['input']['admin_token']['value']
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$admin_port = $resource['input']['admin_port']['value']
|
||||
$port = $resource['input']['port']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
$admin_token = $resource['input']['admin_token']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_port = $resource['input']['db_port']
|
||||
$admin_port = $resource['input']['admin_port']
|
||||
$port = $resource['input']['port']
|
||||
|
||||
class {'keystone':
|
||||
package_ensure => 'present',
|
||||
|
@ -7,7 +7,7 @@ actions:
|
||||
remove: remove.yaml
|
||||
input:
|
||||
modules:
|
||||
schema: [str]
|
||||
schema: [{}]
|
||||
value: []
|
||||
modules_path:
|
||||
schema: str!
|
||||
|
@ -1,19 +1,19 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$state_path = $resource['input']['state_path']['value']
|
||||
$resync_interval = $resource['input']['resync_interval']['value']
|
||||
$interface_driver = $resource['input']['interface_driver']['value']
|
||||
$dhcp_driver = $resource['input']['dhcp_driver']['value']
|
||||
$root_helper = $resource['input']['root_helper']['value']
|
||||
$use_namespaces = $resource['input']['use_namespaces']['value']
|
||||
$dnsmasq_config_file = $resource['input']['dnsmasq_config_file']['value']
|
||||
$dhcp_delete_namespaces = $resource['input']['dhcp_delete_namespaces']['value']
|
||||
$enable_isolated_metadata = $resource['input']['enable_isolated_metadata']['value']
|
||||
$enable_metadata_network = $resource['input']['enable_metadata_network']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$debug = $resource['input']['debug']
|
||||
$state_path = $resource['input']['state_path']
|
||||
$resync_interval = $resource['input']['resync_interval']
|
||||
$interface_driver = $resource['input']['interface_driver']
|
||||
$dhcp_driver = $resource['input']['dhcp_driver']
|
||||
$root_helper = $resource['input']['root_helper']
|
||||
$use_namespaces = $resource['input']['use_namespaces']
|
||||
$dnsmasq_config_file = $resource['input']['dnsmasq_config_file']
|
||||
$dhcp_delete_namespaces = $resource['input']['dhcp_delete_namespaces']
|
||||
$enable_isolated_metadata = $resource['input']['enable_isolated_metadata']
|
||||
$enable_metadata_network = $resource['input']['enable_metadata_network']
|
||||
|
||||
class { 'neutron::agents::dhcp':
|
||||
enabled => true,
|
||||
|
@ -1,28 +1,28 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$external_network_bridge = $resource['input']['external_network_bridge']['value']
|
||||
$use_namespaces = $resource['input']['use_namespaces']['value']
|
||||
$interface_driver = $resource['input']['interface_driver']['value']
|
||||
$router_id = $resource['input']['router_id']['value']
|
||||
$gateway_external_network_id = $resource['input']['gateway_external_network_id']['value']
|
||||
$handle_internal_only_routers = $resource['input']['handle_internal_only_routers']['value']
|
||||
$metadata_port = $resource['input']['metadata_port']['value']
|
||||
$send_arp_for_ha = $resource['input']['send_arp_for_ha']['value']
|
||||
$periodic_interval = $resource['input']['periodic_interval']['value']
|
||||
$periodic_fuzzy_delay = $resource['input']['periodic_fuzzy_delay']['value']
|
||||
$enable_metadata_proxy = $resource['input']['enable_metadata_proxy']['value']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']['value']
|
||||
$router_delete_namespaces = $resource['input']['router_delete_namespaces']['value']
|
||||
$ha_enabled = $resource['input']['ha_enabled']['value']
|
||||
$ha_vrrp_auth_type = $resource['input']['ha_vrrp_auth_type']['value']
|
||||
$ha_vrrp_auth_password = $resource['input']['ha_vrrp_auth_password']['value']
|
||||
$ha_vrrp_advert_int = $resource['input']['ha_vrrp_advert_int']['value']
|
||||
$agent_mode = $resource['input']['agent_mode']['value']
|
||||
$allow_automatic_l3agent_failover = $resource['input']['allow_automatic_l3agent_failover']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$debug = $resource['input']['debug']
|
||||
$external_network_bridge = $resource['input']['external_network_bridge']
|
||||
$use_namespaces = $resource['input']['use_namespaces']
|
||||
$interface_driver = $resource['input']['interface_driver']
|
||||
$router_id = $resource['input']['router_id']
|
||||
$gateway_external_network_id = $resource['input']['gateway_external_network_id']
|
||||
$handle_internal_only_routers = $resource['input']['handle_internal_only_routers']
|
||||
$metadata_port = $resource['input']['metadata_port']
|
||||
$send_arp_for_ha = $resource['input']['send_arp_for_ha']
|
||||
$periodic_interval = $resource['input']['periodic_interval']
|
||||
$periodic_fuzzy_delay = $resource['input']['periodic_fuzzy_delay']
|
||||
$enable_metadata_proxy = $resource['input']['enable_metadata_proxy']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']
|
||||
$router_delete_namespaces = $resource['input']['router_delete_namespaces']
|
||||
$ha_enabled = $resource['input']['ha_enabled']
|
||||
$ha_vrrp_auth_type = $resource['input']['ha_vrrp_auth_type']
|
||||
$ha_vrrp_auth_password = $resource['input']['ha_vrrp_auth_password']
|
||||
$ha_vrrp_advert_int = $resource['input']['ha_vrrp_advert_int']
|
||||
$agent_mode = $resource['input']['agent_mode']
|
||||
$allow_automatic_l3agent_failover = $resource['input']['allow_automatic_l3agent_failover']
|
||||
|
||||
class { 'neutron::agents::l3':
|
||||
enabled => true,
|
||||
|
@ -1,24 +1,24 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
|
||||
$auth_password = $resource['input']['auth_password']['value']
|
||||
$shared_secret = $resource['input']['shared_secret']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$auth_tenant = $resource['input']['auth_tenant']['value']
|
||||
$auth_user = $resource['input']['auth_user']['value']
|
||||
$auth_insecure = $resource['input']['auth_insecure']['value']
|
||||
$auth_ca_cert = $resource['input']['auth_ca_cert']['value']
|
||||
$auth_region = $resource['input']['auth_region']['value']
|
||||
$metadata_ip = $resource['input']['metadata_ip']['value']
|
||||
$metadata_port = $resource['input']['metadata_port']['value']
|
||||
$metadata_workers = $resource['input']['metadata_workers']['value']
|
||||
$metadata_backlog = $resource['input']['metadata_backlog']['value']
|
||||
$metadata_memory_cache_ttl = $resource['input']['metadata_memory_cache_ttl']['value']
|
||||
$auth_password = $resource['input']['auth_password']
|
||||
$shared_secret = $resource['input']['shared_secret']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$debug = $resource['input']['debug']
|
||||
$auth_tenant = $resource['input']['auth_tenant']
|
||||
$auth_user = $resource['input']['auth_user']
|
||||
$auth_insecure = $resource['input']['auth_insecure']
|
||||
$auth_ca_cert = $resource['input']['auth_ca_cert']
|
||||
$auth_region = $resource['input']['auth_region']
|
||||
$metadata_ip = $resource['input']['metadata_ip']
|
||||
$metadata_port = $resource['input']['metadata_port']
|
||||
$metadata_workers = $resource['input']['metadata_workers']
|
||||
$metadata_backlog = $resource['input']['metadata_backlog']
|
||||
$metadata_memory_cache_ttl = $resource['input']['metadata_memory_cache_ttl']
|
||||
|
||||
class { 'neutron::agents::metadata':
|
||||
enabled => true,
|
||||
|
@ -1,22 +1,22 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$enabled = $resource['input']['enabled']['value']
|
||||
$bridge_uplinks = $resource['input']['bridge_uplinks']['value']
|
||||
$bridge_mappings = $resource['input']['bridge_mappings']['value']
|
||||
$integration_bridge = $resource['input']['integration_bridge']['value']
|
||||
$enable_tunneling = $resource['input']['enable_tunneling']['value']
|
||||
$tunnel_types = $resource['input']['tunnel_types']['value']
|
||||
$local_ip = $resource['input']['local_ip']['value']
|
||||
$tunnel_bridge = $resource['input']['tunnel_bridge']['value']
|
||||
$vxlan_udp_port = $resource['input']['vxlan_udp_port']['value']
|
||||
$polling_interval = $resource['input']['polling_interval']['value']
|
||||
$l2_population = $resource['input']['l2_population']['value']
|
||||
$arp_responder = $resource['input']['arp_responder']['value']
|
||||
$firewall_driver = $resource['input']['firewall_driver']['value']
|
||||
$enable_distributed_routing = $resource['input']['enable_distributed_routing']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$enabled = $resource['input']['enabled']
|
||||
$bridge_uplinks = $resource['input']['bridge_uplinks']
|
||||
$bridge_mappings = $resource['input']['bridge_mappings']
|
||||
$integration_bridge = $resource['input']['integration_bridge']
|
||||
$enable_tunneling = $resource['input']['enable_tunneling']
|
||||
$tunnel_types = $resource['input']['tunnel_types']
|
||||
$local_ip = $resource['input']['local_ip']
|
||||
$tunnel_bridge = $resource['input']['tunnel_bridge']
|
||||
$vxlan_udp_port = $resource['input']['vxlan_udp_port']
|
||||
$polling_interval = $resource['input']['polling_interval']
|
||||
$l2_population = $resource['input']['l2_population']
|
||||
$arp_responder = $resource['input']['arp_responder']
|
||||
$firewall_driver = $resource['input']['firewall_driver']
|
||||
$enable_distributed_routing = $resource['input']['enable_distributed_routing']
|
||||
|
||||
class { 'neutron::agents::ml2::ovs':
|
||||
enabled => true,
|
||||
|
@ -1,19 +1,19 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$type_drivers = $resource['input']['type_drivers']['value']
|
||||
$tenant_network_types = $resource['input']['tenant_network_types']['value']
|
||||
$mechanism_drivers = $resource['input']['mechanism_drivers']['value']
|
||||
$flat_networks = $resource['input']['flat_networks']['value']
|
||||
$network_vlan_ranges = $resource['input']['network_vlan_ranges']['value']
|
||||
$tunnel_id_ranges = $resource['input']['tunnel_id_ranges']['value']
|
||||
$vxlan_group = $resource['input']['vxlan_group']['value']
|
||||
$vni_ranges = $resource['input']['vni_ranges']['value']
|
||||
$enable_security_group = $resource['input']['enable_security_group']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$supported_pci_vendor_devs = $resource['input']['supported_pci_vendor_devs']['value']
|
||||
$sriov_agent_required = $resource['input']['sriov_agent_required']['value']
|
||||
$type_drivers = $resource['input']['type_drivers']
|
||||
$tenant_network_types = $resource['input']['tenant_network_types']
|
||||
$mechanism_drivers = $resource['input']['mechanism_drivers']
|
||||
$flat_networks = $resource['input']['flat_networks']
|
||||
$network_vlan_ranges = $resource['input']['network_vlan_ranges']
|
||||
$tunnel_id_ranges = $resource['input']['tunnel_id_ranges']
|
||||
$vxlan_group = $resource['input']['vxlan_group']
|
||||
$vni_ranges = $resource['input']['vni_ranges']
|
||||
$enable_security_group = $resource['input']['enable_security_group']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$supported_pci_vendor_devs = $resource['input']['supported_pci_vendor_devs']
|
||||
$sriov_agent_required = $resource['input']['sriov_agent_required']
|
||||
|
||||
# LP1490438
|
||||
file {'/etc/default/neutron-server':
|
||||
|
@ -1,63 +1,63 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$bind_host = $resource['input']['bind_host']['value']
|
||||
$bind_port = $resource['input']['bind_port']['value']
|
||||
$core_plugin = $resource['input']['core_plugin']['value']
|
||||
$service_plugins = $resource['input']['service_plugins']['value']
|
||||
$auth_strategy = $resource['input']['auth_strategy']['value']
|
||||
$base_mac = $resource['input']['base_mac']['value']
|
||||
$mac_generation_retries = $resource['input']['mac_generation_retries']['value']
|
||||
$dhcp_lease_duration = $resource['input']['dhcp_lease_duration']['value']
|
||||
$dhcp_agents_per_network = $resource['input']['dhcp_agents_per_network']['value']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']['value']
|
||||
$dhcp_agent_notification = $resource['input']['dhcp_agent_notification']['value']
|
||||
$allow_bulk = $resource['input']['allow_bulk']['value']
|
||||
$allow_pagination = $resource['input']['allow_pagination']['value']
|
||||
$allow_sorting = $resource['input']['allow_sorting']['value']
|
||||
$allow_overlapping_ips = $resource['input']['allow_overlapping_ips']['value']
|
||||
$api_extensions_path = $resource['input']['api_extensions_path']['value']
|
||||
$root_helper = $resource['input']['root_helper']['value']
|
||||
$report_interval = $resource['input']['report_interval']['value']
|
||||
$control_exchange = $resource['input']['control_exchange']['value']
|
||||
$rpc_backend = $resource['input']['rpc_backend']['value']
|
||||
$rabbit_password = $resource['input']['rabbit_password']['value']
|
||||
$rabbit_host = $resource['input']['rabbit_host']['value']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']['value']
|
||||
$rabbit_port = $resource['input']['rabbit_port']['value']
|
||||
$rabbit_user = $resource['input']['rabbit_user']['value']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value']
|
||||
$kombu_reconnect_delay = $resource['input']['kombu_reconnect_delay']['value']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']['value']
|
||||
$qpid_port = $resource['input']['qpid_port']['value']
|
||||
$qpid_username = $resource['input']['qpid_username']['value']
|
||||
$qpid_password = $resource['input']['qpid_password']['value']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']['value']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value']
|
||||
$qpid_reconnect = $resource['input']['qpid_reconnect']['value']
|
||||
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']['value']
|
||||
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']['value']
|
||||
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']['value']
|
||||
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']['value']
|
||||
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']['value']
|
||||
$use_ssl = $resource['input']['use_ssl']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$log_file = $resource['input']['log_file']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$bind_host = $resource['input']['bind_host']
|
||||
$bind_port = $resource['input']['bind_port']
|
||||
$core_plugin = $resource['input']['core_plugin']
|
||||
$service_plugins = $resource['input']['service_plugins']
|
||||
$auth_strategy = $resource['input']['auth_strategy']
|
||||
$base_mac = $resource['input']['base_mac']
|
||||
$mac_generation_retries = $resource['input']['mac_generation_retries']
|
||||
$dhcp_lease_duration = $resource['input']['dhcp_lease_duration']
|
||||
$dhcp_agents_per_network = $resource['input']['dhcp_agents_per_network']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']
|
||||
$dhcp_agent_notification = $resource['input']['dhcp_agent_notification']
|
||||
$allow_bulk = $resource['input']['allow_bulk']
|
||||
$allow_pagination = $resource['input']['allow_pagination']
|
||||
$allow_sorting = $resource['input']['allow_sorting']
|
||||
$allow_overlapping_ips = $resource['input']['allow_overlapping_ips']
|
||||
$api_extensions_path = $resource['input']['api_extensions_path']
|
||||
$root_helper = $resource['input']['root_helper']
|
||||
$report_interval = $resource['input']['report_interval']
|
||||
$control_exchange = $resource['input']['control_exchange']
|
||||
$rpc_backend = $resource['input']['rpc_backend']
|
||||
$rabbit_password = $resource['input']['rabbit_password']
|
||||
$rabbit_host = $resource['input']['rabbit_host']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']
|
||||
$rabbit_port = $resource['input']['rabbit_port']
|
||||
$rabbit_user = $resource['input']['rabbit_user']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']
|
||||
$kombu_reconnect_delay = $resource['input']['kombu_reconnect_delay']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']
|
||||
$qpid_port = $resource['input']['qpid_port']
|
||||
$qpid_username = $resource['input']['qpid_username']
|
||||
$qpid_password = $resource['input']['qpid_password']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']
|
||||
$qpid_reconnect = $resource['input']['qpid_reconnect']
|
||||
$qpid_reconnect_timeout = $resource['input']['qpid_reconnect_timeout']
|
||||
$qpid_reconnect_limit = $resource['input']['qpid_reconnect_limit']
|
||||
$qpid_reconnect_interval_min = $resource['input']['qpid_reconnect_interval_min']
|
||||
$qpid_reconnect_interval_max = $resource['input']['qpid_reconnect_interval_max']
|
||||
$qpid_reconnect_interval = $resource['input']['qpid_reconnect_interval']
|
||||
$use_ssl = $resource['input']['use_ssl']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$log_file = $resource['input']['log_file']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
|
||||
class { 'neutron':
|
||||
enabled => true,
|
||||
|
@ -1,49 +1,49 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ip = $resource['input']['ip']['value']
|
||||
$ip = $resource['input']['ip']
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
|
||||
$package_ensure = $resource['input']['package_ensure']['value']
|
||||
$auth_password = $resource['input']['auth_password']['value']
|
||||
$auth_type = $resource['input']['auth_type']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_tenant = $resource['input']['auth_tenant']['value']
|
||||
$auth_user = $resource['input']['auth_user']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$database_max_retries = $resource['input']['database_max_retries']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$database_retry_interval = $resource['input']['database_retry_interval']['value']
|
||||
$database_min_pool_size = $resource['input']['database_min_pool_size']['value']
|
||||
$database_max_pool_size = $resource['input']['database_max_pool_size']['value']
|
||||
$database_max_overflow = $resource['input']['database_max_overflow']['value']
|
||||
$sync_db = $resource['input']['sync_db']['value']
|
||||
$api_workers = $resource['input']['api_workers']['value']
|
||||
$rpc_workers = $resource['input']['rpc_workers']['value']
|
||||
$agent_down_time = $resource['input']['agent_down_time']['value']
|
||||
$router_scheduler_driver = $resource['input']['router_scheduler_driver']['value']
|
||||
$router_distributed = $resource['input']['router_distributed']['value']
|
||||
$l3_ha = $resource['input']['l3_ha']['value']
|
||||
$max_l3_agents_per_router = $resource['input']['max_l3_agents_per_router']['value']
|
||||
$min_l3_agents_per_router = $resource['input']['min_l3_agents_per_router']['value']
|
||||
$l3_ha_net_cidr = $resource['input']['l3_ha_net_cidr']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$sql_max_retries = $resource['input']['sql_max_retries']['value']
|
||||
$max_retries = $resource['input']['max_retries']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
$idle_timeout = $resource['input']['idle_timeout']['value']
|
||||
$sql_reconnect_interval = $resource['input']['sql_reconnect_interval']['value']
|
||||
$retry_interval = $resource['input']['retry_interval']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$log_file = $resource['input']['log_file']['value']
|
||||
$report_interval = $resource['input']['report_interval']['value']
|
||||
$package_ensure = $resource['input']['package_ensure']
|
||||
$auth_password = $resource['input']['auth_password']
|
||||
$auth_type = $resource['input']['auth_type']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_tenant = $resource['input']['auth_tenant']
|
||||
$auth_user = $resource['input']['auth_user']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$database_max_retries = $resource['input']['database_max_retries']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$database_retry_interval = $resource['input']['database_retry_interval']
|
||||
$database_min_pool_size = $resource['input']['database_min_pool_size']
|
||||
$database_max_pool_size = $resource['input']['database_max_pool_size']
|
||||
$database_max_overflow = $resource['input']['database_max_overflow']
|
||||
$sync_db = $resource['input']['sync_db']
|
||||
$api_workers = $resource['input']['api_workers']
|
||||
$rpc_workers = $resource['input']['rpc_workers']
|
||||
$agent_down_time = $resource['input']['agent_down_time']
|
||||
$router_scheduler_driver = $resource['input']['router_scheduler_driver']
|
||||
$router_distributed = $resource['input']['router_distributed']
|
||||
$l3_ha = $resource['input']['l3_ha']
|
||||
$max_l3_agents_per_router = $resource['input']['max_l3_agents_per_router']
|
||||
$min_l3_agents_per_router = $resource['input']['min_l3_agents_per_router']
|
||||
$l3_ha_net_cidr = $resource['input']['l3_ha_net_cidr']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
$sql_max_retries = $resource['input']['sql_max_retries']
|
||||
$max_retries = $resource['input']['max_retries']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
$idle_timeout = $resource['input']['idle_timeout']
|
||||
$sql_reconnect_interval = $resource['input']['sql_reconnect_interval']
|
||||
$retry_interval = $resource['input']['retry_interval']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$log_file = $resource['input']['log_file']
|
||||
$report_interval = $resource['input']['report_interval']
|
||||
|
||||
class { 'neutron::server':
|
||||
enabled => true,
|
||||
|
@ -1,17 +1,17 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$use_lnx = $resource['input']['use_lnx']['value']
|
||||
$use_ovs = $resource['input']['use_ovs']['value']
|
||||
$install_ovs = $resource['input']['install_ovs']['value']
|
||||
$install_brtool = $resource['input']['install_brtool']['value']
|
||||
$install_ethtool = $resource['input']['install_ethtool']['value']
|
||||
$install_bondtool = $resource['input']['install_bondtool']['value']
|
||||
$install_vlantool = $resource['input']['install_vlantool']['value']
|
||||
$ovs_modname = $resource['input']['ovs_modname']['value']
|
||||
$ovs_datapath_package_name = $resource['input']['ovs_datapath_package_name']['value']
|
||||
$ovs_common_package_name = $resource['input']['ovs_common_package_name']['value']
|
||||
$network_scheme = $resource['input']['network_scheme']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$use_lnx = $resource['input']['use_lnx']
|
||||
$use_ovs = $resource['input']['use_ovs']
|
||||
$install_ovs = $resource['input']['install_ovs']
|
||||
$install_brtool = $resource['input']['install_brtool']
|
||||
$install_ethtool = $resource['input']['install_ethtool']
|
||||
$install_bondtool = $resource['input']['install_bondtool']
|
||||
$install_vlantool = $resource['input']['install_vlantool']
|
||||
$ovs_modname = $resource['input']['ovs_modname']
|
||||
$ovs_datapath_package_name = $resource['input']['ovs_datapath_package_name']
|
||||
$ovs_common_package_name = $resource['input']['ovs_common_package_name']
|
||||
$network_scheme = $resource['input']['network_scheme']
|
||||
|
||||
class {'l23network':
|
||||
ensure_package => $ensure_package,
|
||||
|
@ -1,35 +1,35 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$auth_strategy = $resource['input']['auth_strategy']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_version = $resource['input']['auth_version']['value']
|
||||
$admin_tenant_name = $resource['input']['admin_tenant_name']['value']
|
||||
$admin_user = $resource['input']['admin_user']['value']
|
||||
$admin_password = $resource['input']['admin_password']['value']
|
||||
$api_bind_address = $resource['input']['api_bind_address']['value']
|
||||
$metadata_listen = $resource['input']['metadata_listen']['value']
|
||||
$enabled_apis = $resource['input']['enabled_apis']['value']
|
||||
$keystone_ec2_url = $resource['input']['keystone_ec2_url']['value']
|
||||
$volume_api_class = $resource['input']['volume_api_class']['value']
|
||||
$use_forwarded_for = $resource['input']['use_forwarded_for']['value']
|
||||
$osapi_compute_workers = $resource['input']['osapi_compute_workers']['value']
|
||||
$ec2_workers = $resource['input']['ec2_workers']['value']
|
||||
$metadata_workers = $resource['input']['metadata_workers']['value']
|
||||
$sync_db = $resource['input']['sync_db']['value']
|
||||
$neutron_metadata_proxy_shared_secret = $resource['input']['neutron_metadata_proxy_shared_secret']['value']
|
||||
$osapi_v3 = $resource['input']['osapi_v3']['value']
|
||||
$pci_alias = $resource['input']['pci_alias']['value']
|
||||
$ratelimits = $resource['input']['ratelimits']['value']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
$workers = $resource['input']['workers']['value']
|
||||
$conductor_workers = $resource['input']['conductor_workers']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$auth_strategy = $resource['input']['auth_strategy']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_version = $resource['input']['auth_version']
|
||||
$admin_tenant_name = $resource['input']['admin_tenant_name']
|
||||
$admin_user = $resource['input']['admin_user']
|
||||
$admin_password = $resource['input']['admin_password']
|
||||
$api_bind_address = $resource['input']['api_bind_address']
|
||||
$metadata_listen = $resource['input']['metadata_listen']
|
||||
$enabled_apis = $resource['input']['enabled_apis']
|
||||
$keystone_ec2_url = $resource['input']['keystone_ec2_url']
|
||||
$volume_api_class = $resource['input']['volume_api_class']
|
||||
$use_forwarded_for = $resource['input']['use_forwarded_for']
|
||||
$osapi_compute_workers = $resource['input']['osapi_compute_workers']
|
||||
$ec2_workers = $resource['input']['ec2_workers']
|
||||
$metadata_workers = $resource['input']['metadata_workers']
|
||||
$sync_db = $resource['input']['sync_db']
|
||||
$neutron_metadata_proxy_shared_secret = $resource['input']['neutron_metadata_proxy_shared_secret']
|
||||
$osapi_v3 = $resource['input']['osapi_v3']
|
||||
$pci_alias = $resource['input']['pci_alias']
|
||||
$ratelimits = $resource['input']['ratelimits']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']
|
||||
$validate = $resource['input']['validate']
|
||||
$validation_options = $resource['input']['validation_options']
|
||||
$workers = $resource['input']['workers']
|
||||
$conductor_workers = $resource['input']['conductor_workers']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,35 +1,35 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$auth_strategy = $resource['input']['auth_strategy']['value']
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$auth_uri = $resource['input']['auth_uri']['value']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']['value']
|
||||
$auth_version = $resource['input']['auth_version']['value']
|
||||
$admin_tenant_name = $resource['input']['admin_tenant_name']['value']
|
||||
$admin_user = $resource['input']['admin_user']['value']
|
||||
$admin_password = $resource['input']['admin_password']['value']
|
||||
$api_bind_address = $resource['input']['api_bind_address']['value']
|
||||
$metadata_listen = $resource['input']['metadata_listen']['value']
|
||||
$enabled_apis = $resource['input']['enabled_apis']['value']
|
||||
$keystone_ec2_url = $resource['input']['keystone_ec2_url']['value']
|
||||
$volume_api_class = $resource['input']['volume_api_class']['value']
|
||||
$use_forwarded_for = $resource['input']['use_forwarded_for']['value']
|
||||
$osapi_compute_workers = $resource['input']['osapi_compute_workers']['value']
|
||||
$ec2_workers = $resource['input']['ec2_workers']['value']
|
||||
$metadata_workers = $resource['input']['metadata_workers']['value']
|
||||
$sync_db = $resource['input']['sync_db']['value']
|
||||
$neutron_metadata_proxy_shared_secret = $resource['input']['neutron_metadata_proxy_shared_secret']['value']
|
||||
$osapi_v3 = $resource['input']['osapi_v3']['value']
|
||||
$pci_alias = $resource['input']['pci_alias']['value']
|
||||
$ratelimits = $resource['input']['ratelimits']['value']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']['value']
|
||||
$validate = $resource['input']['validate']['value']
|
||||
$validation_options = $resource['input']['validation_options']['value']
|
||||
$workers = $resource['input']['workers']['value']
|
||||
$conductor_workers = $resource['input']['conductor_workers']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$auth_strategy = $resource['input']['auth_strategy']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$auth_uri = $resource['input']['auth_uri']
|
||||
$auth_admin_prefix = $resource['input']['auth_admin_prefix']
|
||||
$auth_version = $resource['input']['auth_version']
|
||||
$admin_tenant_name = $resource['input']['admin_tenant_name']
|
||||
$admin_user = $resource['input']['admin_user']
|
||||
$admin_password = $resource['input']['admin_password']
|
||||
$api_bind_address = $resource['input']['api_bind_address']
|
||||
$metadata_listen = $resource['input']['metadata_listen']
|
||||
$enabled_apis = $resource['input']['enabled_apis']
|
||||
$keystone_ec2_url = $resource['input']['keystone_ec2_url']
|
||||
$volume_api_class = $resource['input']['volume_api_class']
|
||||
$use_forwarded_for = $resource['input']['use_forwarded_for']
|
||||
$osapi_compute_workers = $resource['input']['osapi_compute_workers']
|
||||
$ec2_workers = $resource['input']['ec2_workers']
|
||||
$metadata_workers = $resource['input']['metadata_workers']
|
||||
$sync_db = $resource['input']['sync_db']
|
||||
$neutron_metadata_proxy_shared_secret = $resource['input']['neutron_metadata_proxy_shared_secret']
|
||||
$osapi_v3 = $resource['input']['osapi_v3']
|
||||
$pci_alias = $resource['input']['pci_alias']
|
||||
$ratelimits = $resource['input']['ratelimits']
|
||||
$ratelimits_factory = $resource['input']['ratelimits_factory']
|
||||
$validate = $resource['input']['validate']
|
||||
$validation_options = $resource['input']['validation_options']
|
||||
$workers = $resource['input']['workers']
|
||||
$conductor_workers = $resource['input']['conductor_workers']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,19 +1,19 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$libvirt_virt_type = $resource['input']['libvirt_virt_type']['value']
|
||||
$vncserver_listen = $resource['input']['vncserver_listen']['value']
|
||||
$migration_support = $resource['input']['migration_support']['value']
|
||||
$libvirt_cpu_mode = $resource['input']['libvirt_cpu_mode']['value']
|
||||
$libvirt_disk_cachemodes = $resource['input']['libvirt_disk_cachemodes']['value']
|
||||
$libvirt_inject_password = $resource['input']['libvirt_inject_password']['value']
|
||||
$libvirt_inject_key = $resource['input']['libvirt_inject_key']['value']
|
||||
$libvirt_inject_partition = $resource['input']['libvirt_inject_partition']['value']
|
||||
$remove_unused_base_images = $resource['input']['remove_unused_base_images']['value']
|
||||
$remove_unused_kernels = $resource['input']['remove_unused_kernels']['value']
|
||||
$remove_unused_resized_minimum_age_seconds = $resource['input']['remove_unused_resized_minimum_age_seconds']['value']
|
||||
$remove_unused_original_minimum_age_seconds = $resource['input']['remove_unused_original_minimum_age_seconds']['value']
|
||||
$libvirt_service_name = $resource['input']['libvirt_service_name']['value']
|
||||
$libvirt_type = $resource['input']['libvirt_type']['value']
|
||||
$libvirt_virt_type = $resource['input']['libvirt_virt_type']
|
||||
$vncserver_listen = $resource['input']['vncserver_listen']
|
||||
$migration_support = $resource['input']['migration_support']
|
||||
$libvirt_cpu_mode = $resource['input']['libvirt_cpu_mode']
|
||||
$libvirt_disk_cachemodes = $resource['input']['libvirt_disk_cachemodes']
|
||||
$libvirt_inject_password = $resource['input']['libvirt_inject_password']
|
||||
$libvirt_inject_key = $resource['input']['libvirt_inject_key']
|
||||
$libvirt_inject_partition = $resource['input']['libvirt_inject_partition']
|
||||
$remove_unused_base_images = $resource['input']['remove_unused_base_images']
|
||||
$remove_unused_kernels = $resource['input']['remove_unused_kernels']
|
||||
$remove_unused_resized_minimum_age_seconds = $resource['input']['remove_unused_resized_minimum_age_seconds']
|
||||
$remove_unused_original_minimum_age_seconds = $resource['input']['remove_unused_original_minimum_age_seconds']
|
||||
$libvirt_service_name = $resource['input']['libvirt_service_name']
|
||||
$libvirt_type = $resource['input']['libvirt_type']
|
||||
|
||||
class { 'nova::compute::libvirt':
|
||||
libvirt_virt_type => $libvirt_virt_type,
|
||||
|
@ -1,19 +1,19 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$libvirt_virt_type = $resource['input']['libvirt_virt_type']['value']
|
||||
$vncserver_listen = $resource['input']['vncserver_listen']['value']
|
||||
$migration_support = $resource['input']['migration_support']['value']
|
||||
$libvirt_cpu_mode = $resource['input']['libvirt_cpu_mode']['value']
|
||||
$libvirt_disk_cachemodes = $resource['input']['libvirt_disk_cachemodes']['value']
|
||||
$libvirt_inject_password = $resource['input']['libvirt_inject_password']['value']
|
||||
$libvirt_inject_key = $resource['input']['libvirt_inject_key']['value']
|
||||
$libvirt_inject_partition = $resource['input']['libvirt_inject_partition']['value']
|
||||
$remove_unused_base_images = $resource['input']['remove_unused_base_images']['value']
|
||||
$remove_unused_kernels = $resource['input']['remove_unused_kernels']['value']
|
||||
$remove_unused_resized_minimum_age_seconds = $resource['input']['remove_unused_resized_minimum_age_seconds']['value']
|
||||
$remove_unused_original_minimum_age_seconds = $resource['input']['remove_unused_original_minimum_age_seconds']['value']
|
||||
$libvirt_service_name = $resource['input']['libvirt_service_name']['value']
|
||||
$libvirt_type = $resource['input']['libvirt_type']['value']
|
||||
$libvirt_virt_type = $resource['input']['libvirt_virt_type']
|
||||
$vncserver_listen = $resource['input']['vncserver_listen']
|
||||
$migration_support = $resource['input']['migration_support']
|
||||
$libvirt_cpu_mode = $resource['input']['libvirt_cpu_mode']
|
||||
$libvirt_disk_cachemodes = $resource['input']['libvirt_disk_cachemodes']
|
||||
$libvirt_inject_password = $resource['input']['libvirt_inject_password']
|
||||
$libvirt_inject_key = $resource['input']['libvirt_inject_key']
|
||||
$libvirt_inject_partition = $resource['input']['libvirt_inject_partition']
|
||||
$remove_unused_base_images = $resource['input']['remove_unused_base_images']
|
||||
$remove_unused_kernels = $resource['input']['remove_unused_kernels']
|
||||
$remove_unused_resized_minimum_age_seconds = $resource['input']['remove_unused_resized_minimum_age_seconds']
|
||||
$remove_unused_original_minimum_age_seconds = $resource['input']['remove_unused_original_minimum_age_seconds']
|
||||
$libvirt_service_name = $resource['input']['libvirt_service_name']
|
||||
$libvirt_type = $resource['input']['libvirt_type']
|
||||
|
||||
class { 'nova::compute::libvirt':
|
||||
libvirt_virt_type => $libvirt_virt_type,
|
||||
|
@ -1,26 +1,26 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$vnc_enabled = $resource['input']['vnc_enabled']['value']
|
||||
$vncserver_proxyclient_address = $resource['input']['vncserver_proxyclient_address']['value']
|
||||
$vncproxy_host = $resource['input']['vncproxy_host']['value']
|
||||
$vncproxy_protocol = $resource['input']['vncproxy_protocol']['value']
|
||||
$vncproxy_port = $resource['input']['vncproxy_port']['value']
|
||||
$vncproxy_path = $resource['input']['vncproxy_path']['value']
|
||||
$vnc_keymap = $resource['input']['vnc_keymap']['value']
|
||||
$force_config_drive = $resource['input']['force_config_drive']['value']
|
||||
$virtio_nic = $resource['input']['virtio_nic']['value']
|
||||
$neutron_enabled = $resource['input']['neutron_enabled']['value']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']['value']
|
||||
$instance_usage_audit = $resource['input']['instance_usage_audit']['value']
|
||||
$instance_usage_audit_period = $resource['input']['instance_usage_audit_period']['value']
|
||||
$force_raw_images = $resource['input']['force_raw_images']['value']
|
||||
$reserved_host_memory = $resource['input']['reserved_host_memory']['value']
|
||||
$compute_manager = $resource['input']['compute_manager']['value']
|
||||
$pci_passthrough = $resource['input']['pci_passthrough']['value']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']['value']
|
||||
$default_schedule_zone = $resource['input']['default_schedule_zone']['value']
|
||||
$internal_service_availability_zone = $resource['input']['internal_service_availability_zone']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$vnc_enabled = $resource['input']['vnc_enabled']
|
||||
$vncserver_proxyclient_address = $resource['input']['vncserver_proxyclient_address']
|
||||
$vncproxy_host = $resource['input']['vncproxy_host']
|
||||
$vncproxy_protocol = $resource['input']['vncproxy_protocol']
|
||||
$vncproxy_port = $resource['input']['vncproxy_port']
|
||||
$vncproxy_path = $resource['input']['vncproxy_path']
|
||||
$vnc_keymap = $resource['input']['vnc_keymap']
|
||||
$force_config_drive = $resource['input']['force_config_drive']
|
||||
$virtio_nic = $resource['input']['virtio_nic']
|
||||
$neutron_enabled = $resource['input']['neutron_enabled']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']
|
||||
$instance_usage_audit = $resource['input']['instance_usage_audit']
|
||||
$instance_usage_audit_period = $resource['input']['instance_usage_audit_period']
|
||||
$force_raw_images = $resource['input']['force_raw_images']
|
||||
$reserved_host_memory = $resource['input']['reserved_host_memory']
|
||||
$compute_manager = $resource['input']['compute_manager']
|
||||
$pci_passthrough = $resource['input']['pci_passthrough']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']
|
||||
$default_schedule_zone = $resource['input']['default_schedule_zone']
|
||||
$internal_service_availability_zone = $resource['input']['internal_service_availability_zone']
|
||||
|
||||
class { 'nova::compute':
|
||||
enabled => true,
|
||||
|
@ -1,26 +1,26 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$vnc_enabled = $resource['input']['vnc_enabled']['value']
|
||||
$vncserver_proxyclient_address = $resource['input']['vncserver_proxyclient_address']['value']
|
||||
$vncproxy_host = $resource['input']['vncproxy_host']['value']
|
||||
$vncproxy_protocol = $resource['input']['vncproxy_protocol']['value']
|
||||
$vncproxy_port = $resource['input']['vncproxy_port']['value']
|
||||
$vncproxy_path = $resource['input']['vncproxy_path']['value']
|
||||
$vnc_keymap = $resource['input']['vnc_keymap']['value']
|
||||
$force_config_drive = $resource['input']['force_config_drive']['value']
|
||||
$virtio_nic = $resource['input']['virtio_nic']['value']
|
||||
$neutron_enabled = $resource['input']['neutron_enabled']['value']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']['value']
|
||||
$instance_usage_audit = $resource['input']['instance_usage_audit']['value']
|
||||
$instance_usage_audit_period = $resource['input']['instance_usage_audit_period']['value']
|
||||
$force_raw_images = $resource['input']['force_raw_images']['value']
|
||||
$reserved_host_memory = $resource['input']['reserved_host_memory']['value']
|
||||
$compute_manager = $resource['input']['compute_manager']['value']
|
||||
$pci_passthrough = $resource['input']['pci_passthrough']['value']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']['value']
|
||||
$default_schedule_zone = $resource['input']['default_schedule_zone']['value']
|
||||
$internal_service_availability_zone = $resource['input']['internal_service_availability_zone']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$vnc_enabled = $resource['input']['vnc_enabled']
|
||||
$vncserver_proxyclient_address = $resource['input']['vncserver_proxyclient_address']
|
||||
$vncproxy_host = $resource['input']['vncproxy_host']
|
||||
$vncproxy_protocol = $resource['input']['vncproxy_protocol']
|
||||
$vncproxy_port = $resource['input']['vncproxy_port']
|
||||
$vncproxy_path = $resource['input']['vncproxy_path']
|
||||
$vnc_keymap = $resource['input']['vnc_keymap']
|
||||
$force_config_drive = $resource['input']['force_config_drive']
|
||||
$virtio_nic = $resource['input']['virtio_nic']
|
||||
$neutron_enabled = $resource['input']['neutron_enabled']
|
||||
$network_device_mtu = $resource['input']['network_device_mtu']
|
||||
$instance_usage_audit = $resource['input']['instance_usage_audit']
|
||||
$instance_usage_audit_period = $resource['input']['instance_usage_audit_period']
|
||||
$force_raw_images = $resource['input']['force_raw_images']
|
||||
$reserved_host_memory = $resource['input']['reserved_host_memory']
|
||||
$compute_manager = $resource['input']['compute_manager']
|
||||
$pci_passthrough = $resource['input']['pci_passthrough']
|
||||
$default_availability_zone = $resource['input']['default_availability_zone']
|
||||
$default_schedule_zone = $resource['input']['default_schedule_zone']
|
||||
$internal_service_availability_zone = $resource['input']['internal_service_availability_zone']
|
||||
|
||||
class { 'nova::compute':
|
||||
enabled => true,
|
||||
|
@ -1,7 +1,7 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$workers = $resource['input']['workers']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$workers = $resource['input']['workers']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,7 +1,7 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$workers = $resource['input']['workers']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$workers = $resource['input']['workers']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,6 +1,6 @@
|
||||
$service_title = $resource['input']['title']['value']
|
||||
$package_name = $resource['input']['package_name']['value']
|
||||
$service_name = $resource['input']['service_name']['value']
|
||||
$service_title = $resource['input']['title']
|
||||
$package_name = $resource['input']['package_name']
|
||||
$service_name = $resource['input']['service_name']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,9 +1,9 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$service_title = $resource['input']['title']['value']
|
||||
$package_name = $resource['input']['package_name']['value']
|
||||
$service_name = $resource['input']['service_name']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$service_title = $resource['input']['title']
|
||||
$package_name = $resource['input']['package_name']
|
||||
$service_name = $resource['input']['service_name']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,9 +1,9 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$service_title = $resource['input']['title']['value']
|
||||
$package_name = $resource['input']['package_name']['value']
|
||||
$service_name = $resource['input']['service_name']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$service_title = $resource['input']['title']
|
||||
$package_name = $resource['input']['package_name']
|
||||
$service_name = $resource['input']['service_name']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
|
||||
exec { 'post-nova_config':
|
||||
command => '/bin/echo "Nova config has changed"',
|
||||
|
@ -1,30 +1,30 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$auth_host = $resource['input']['auth_host']['value']
|
||||
$auth_port = $resource['input']['auth_port']['value']
|
||||
$auth_protocol = $resource['input']['auth_protocol']['value']
|
||||
$neutron_endpoint_host = $resource['input']['neutron_endpoint_host']['value']
|
||||
$neutron_endpoint_port = $resource['input']['neutron_endpoint_port']['value']
|
||||
$neutron_endpoint_protocol = $resource['input']['neutron_endpoint_protocol']['value']
|
||||
$auth_host = $resource['input']['auth_host']
|
||||
$auth_port = $resource['input']['auth_port']
|
||||
$auth_protocol = $resource['input']['auth_protocol']
|
||||
$neutron_endpoint_host = $resource['input']['neutron_endpoint_host']
|
||||
$neutron_endpoint_port = $resource['input']['neutron_endpoint_port']
|
||||
$neutron_endpoint_protocol = $resource['input']['neutron_endpoint_protocol']
|
||||
|
||||
$libvirt_vif_driver = $resource['input']['libvirt_vif_driver']['value']
|
||||
$force_snat_range = $resource['input']['force_snat_range']['value']
|
||||
$neutron_admin_password = $resource['input']['neutron_admin_password']['value']
|
||||
$neutron_auth_strategy = $resource['input']['neutron_auth_strategy']['value']
|
||||
$neutron_url_timeout = $resource['input']['neutron_url_timeout']['value']
|
||||
$neutron_admin_tenant_name = $resource['input']['neutron_admin_tenant_name']['value']
|
||||
$neutron_default_tenant_id = $resource['input']['neutron_default_tenant_id']['value']
|
||||
$neutron_region_name = $resource['input']['neutron_region_name']['value']
|
||||
$neutron_admin_username = $resource['input']['neutron_admin_username']['value']
|
||||
$neutron_ovs_bridge = $resource['input']['neutron_ovs_bridge']['value']
|
||||
$neutron_extension_sync_interval = $resource['input']['neutron_extension_sync_interval']['value']
|
||||
$neutron_ca_certificates_file = $resource['input']['neutron_ca_certificates_file']['value']
|
||||
$network_api_class = $resource['input']['network_api_class']['value']
|
||||
$security_group_api = $resource['input']['security_group_api']['value']
|
||||
$firewall_driver = $resource['input']['firewall_driver']['value']
|
||||
$vif_plugging_is_fatal = $resource['input']['vif_plugging_is_fatal']['value']
|
||||
$vif_plugging_timeout = $resource['input']['vif_plugging_timeout']['value']
|
||||
$dhcp_domain = $resource['input']['dhcp_domain']['value']
|
||||
$libvirt_vif_driver = $resource['input']['libvirt_vif_driver']
|
||||
$force_snat_range = $resource['input']['force_snat_range']
|
||||
$neutron_admin_password = $resource['input']['neutron_admin_password']
|
||||
$neutron_auth_strategy = $resource['input']['neutron_auth_strategy']
|
||||
$neutron_url_timeout = $resource['input']['neutron_url_timeout']
|
||||
$neutron_admin_tenant_name = $resource['input']['neutron_admin_tenant_name']
|
||||
$neutron_default_tenant_id = $resource['input']['neutron_default_tenant_id']
|
||||
$neutron_region_name = $resource['input']['neutron_region_name']
|
||||
$neutron_admin_username = $resource['input']['neutron_admin_username']
|
||||
$neutron_ovs_bridge = $resource['input']['neutron_ovs_bridge']
|
||||
$neutron_extension_sync_interval = $resource['input']['neutron_extension_sync_interval']
|
||||
$neutron_ca_certificates_file = $resource['input']['neutron_ca_certificates_file']
|
||||
$network_api_class = $resource['input']['network_api_class']
|
||||
$security_group_api = $resource['input']['security_group_api']
|
||||
$firewall_driver = $resource['input']['firewall_driver']
|
||||
$vif_plugging_is_fatal = $resource['input']['vif_plugging_is_fatal']
|
||||
$vif_plugging_timeout = $resource['input']['vif_plugging_timeout']
|
||||
$dhcp_domain = $resource['input']['dhcp_domain']
|
||||
|
||||
|
||||
class { 'nova::compute::neutron':
|
||||
|
@ -1,76 +1,76 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$db_user = $resource['input']['db_user']['value']
|
||||
$db_password = $resource['input']['db_password']['value']
|
||||
$db_name = $resource['input']['db_name']['value']
|
||||
$db_host = $resource['input']['db_host']['value']
|
||||
$db_port = $resource['input']['db_port']['value']
|
||||
$glance_api_servers_host = $resource['input']['glance_api_servers_host']['value']
|
||||
$glance_api_servers_port = $resource['input']['glance_api_servers_port']['value']
|
||||
$db_user = $resource['input']['db_user']
|
||||
$db_password = $resource['input']['db_password']
|
||||
$db_name = $resource['input']['db_name']
|
||||
$db_host = $resource['input']['db_host']
|
||||
$db_port = $resource['input']['db_port']
|
||||
$glance_api_servers_host = $resource['input']['glance_api_servers_host']
|
||||
$glance_api_servers_port = $resource['input']['glance_api_servers_port']
|
||||
|
||||
$ensure_package = $resource['input']['ensure_package']['value']
|
||||
$database_connection = $resource['input']['database_connection']['value']
|
||||
$slave_connection = $resource['input']['slave_connection']['value']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']['value']
|
||||
$rpc_backend = $resource['input']['rpc_backend']['value']
|
||||
$image_service = $resource['input']['image_service']['value']
|
||||
$glance_api_servers = $resource['input']['glance_api_servers']['value']
|
||||
$memcached_servers = $resource['input']['memcached_servers']['value']
|
||||
$rabbit_host = $resource['input']['rabbit_host']['value']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']['value']
|
||||
$rabbit_password = $resource['input']['rabbit_password']['value']
|
||||
$rabbit_port = $resource['input']['rabbit_port']['value']
|
||||
$rabbit_userid = $resource['input']['rabbit_userid']['value']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']['value']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']['value']
|
||||
$rabbit_ha_queues = $resource['input']['rabbit_ha_queues']['value']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']['value']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']['value']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']['value']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']['value']
|
||||
$amqp_durable_queues = $resource['input']['amqp_durable_queues']['value']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']['value']
|
||||
$qpid_port = $resource['input']['qpid_port']['value']
|
||||
$qpid_username = $resource['input']['qpid_username']['value']
|
||||
$qpid_password = $resource['input']['qpid_password']['value']
|
||||
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']['value']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']['value']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']['value']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']['value']
|
||||
$auth_strategy = $resource['input']['auth_strategy']['value']
|
||||
$service_down_time = $resource['input']['service_down_time']['value']
|
||||
$log_dir = $resource['input']['log_dir']['value']
|
||||
$state_path = $resource['input']['state_path']['value']
|
||||
$lock_path = $resource['input']['lock_path']['value']
|
||||
$verbose = $resource['input']['verbose']['value']
|
||||
$debug = $resource['input']['debug']['value']
|
||||
$periodic_interval = $resource['input']['periodic_interval']['value']
|
||||
$report_interval = $resource['input']['report_interval']['value']
|
||||
$rootwrap_config = $resource['input']['rootwrap_config']['value']
|
||||
$use_ssl = $resource['input']['use_ssl']['value']
|
||||
$enabled_ssl_apis = $resource['input']['enabled_ssl_apis']['value']
|
||||
$ca_file = $resource['input']['ca_file']['value']
|
||||
$cert_file = $resource['input']['cert_file']['value']
|
||||
$key_file = $resource['input']['key_file']['value']
|
||||
$nova_user_id = $resource['input']['nova_user_id']['value']
|
||||
$nova_group_id = $resource['input']['nova_group_id']['value']
|
||||
$nova_public_key = $resource['input']['nova_public_key']['value']
|
||||
$nova_private_key = $resource['input']['nova_private_key']['value']
|
||||
$nova_shell = $resource['input']['nova_shell']['value']
|
||||
$monitoring_notifications = $resource['input']['monitoring_notifications']['value']
|
||||
$use_syslog = $resource['input']['use_syslog']['value']
|
||||
$log_facility = $resource['input']['log_facility']['value']
|
||||
$install_utilities = $resource['input']['install_utilities']['value']
|
||||
$notification_driver = $resource['input']['notification_driver']['value']
|
||||
$notification_topics = $resource['input']['notification_topics']['value']
|
||||
$notify_api_faults = $resource['input']['notify_api_faults']['value']
|
||||
$notify_on_state_change = $resource['input']['notify_on_state_change']['value']
|
||||
$mysql_module = $resource['input']['mysql_module']['value']
|
||||
$nova_cluster_id = $resource['input']['nova_cluster_id']['value']
|
||||
$sql_connection = $resource['input']['sql_connection']['value']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']['value']
|
||||
$logdir = $resource['input']['logdir']['value']
|
||||
$os_region_name = $resource['input']['os_region_name']['value']
|
||||
$ensure_package = $resource['input']['ensure_package']
|
||||
$database_connection = $resource['input']['database_connection']
|
||||
$slave_connection = $resource['input']['slave_connection']
|
||||
$database_idle_timeout = $resource['input']['database_idle_timeout']
|
||||
$rpc_backend = $resource['input']['rpc_backend']
|
||||
$image_service = $resource['input']['image_service']
|
||||
$glance_api_servers = $resource['input']['glance_api_servers']
|
||||
$memcached_servers = $resource['input']['memcached_servers']
|
||||
$rabbit_host = $resource['input']['rabbit_host']
|
||||
$rabbit_hosts = $resource['input']['rabbit_hosts']
|
||||
$rabbit_password = $resource['input']['rabbit_password']
|
||||
$rabbit_port = $resource['input']['rabbit_port']
|
||||
$rabbit_userid = $resource['input']['rabbit_userid']
|
||||
$rabbit_virtual_host = $resource['input']['rabbit_virtual_host']
|
||||
$rabbit_use_ssl = $resource['input']['rabbit_use_ssl']
|
||||
$rabbit_ha_queues = $resource['input']['rabbit_ha_queues']
|
||||
$kombu_ssl_ca_certs = $resource['input']['kombu_ssl_ca_certs']
|
||||
$kombu_ssl_certfile = $resource['input']['kombu_ssl_certfile']
|
||||
$kombu_ssl_keyfile = $resource['input']['kombu_ssl_keyfile']
|
||||
$kombu_ssl_version = $resource['input']['kombu_ssl_version']
|
||||
$amqp_durable_queues = $resource['input']['amqp_durable_queues']
|
||||
$qpid_hostname = $resource['input']['qpid_hostname']
|
||||
$qpid_port = $resource['input']['qpid_port']
|
||||
$qpid_username = $resource['input']['qpid_username']
|
||||
$qpid_password = $resource['input']['qpid_password']
|
||||
$qpid_sasl_mechanisms = $resource['input']['qpid_sasl_mechanisms']
|
||||
$qpid_heartbeat = $resource['input']['qpid_heartbeat']
|
||||
$qpid_protocol = $resource['input']['qpid_protocol']
|
||||
$qpid_tcp_nodelay = $resource['input']['qpid_tcp_nodelay']
|
||||
$auth_strategy = $resource['input']['auth_strategy']
|
||||
$service_down_time = $resource['input']['service_down_time']
|
||||
$log_dir = $resource['input']['log_dir']
|
||||
$state_path = $resource['input']['state_path']
|
||||
$lock_path = $resource['input']['lock_path']
|
||||
$verbose = $resource['input']['verbose']
|
||||
$debug = $resource['input']['debug']
|
||||
$periodic_interval = $resource['input']['periodic_interval']
|
||||
$report_interval = $resource['input']['report_interval']
|
||||
$rootwrap_config = $resource['input']['rootwrap_config']
|
||||
$use_ssl = $resource['input']['use_ssl']
|
||||
$enabled_ssl_apis = $resource['input']['enabled_ssl_apis']
|
||||
$ca_file = $resource['input']['ca_file']
|
||||
$cert_file = $resource['input']['cert_file']
|
||||
$key_file = $resource['input']['key_file']
|
||||
$nova_user_id = $resource['input']['nova_user_id']
|
||||
$nova_group_id = $resource['input']['nova_group_id']
|
||||
$nova_public_key = $resource['input']['nova_public_key']
|
||||
$nova_private_key = $resource['input']['nova_private_key']
|
||||
$nova_shell = $resource['input']['nova_shell']
|
||||
$monitoring_notifications = $resource['input']['monitoring_notifications']
|
||||
$use_syslog = $resource['input']['use_syslog']
|
||||
$log_facility = $resource['input']['log_facility']
|
||||
$install_utilities = $resource['input']['install_utilities']
|
||||
$notification_driver = $resource['input']['notification_driver']
|
||||
$notification_topics = $resource['input']['notification_topics']
|
||||
$notify_api_faults = $resource['input']['notify_api_faults']
|
||||
$notify_on_state_change = $resource['input']['notify_on_state_change']
|
||||
$mysql_module = $resource['input']['mysql_module']
|
||||
$nova_cluster_id = $resource['input']['nova_cluster_id']
|
||||
$sql_connection = $resource['input']['sql_connection']
|
||||
$sql_idle_timeout = $resource['input']['sql_idle_timeout']
|
||||
$logdir = $resource['input']['logdir']
|
||||
$os_region_name = $resource['input']['os_region_name']
|
||||
|
||||
class { 'nova':
|
||||
database_connection => "mysql://${db_user}:${db_password}@${db_host}:${db_port}/${db_name}?charset=utf8",
|
||||
|
@ -1,7 +1,7 @@
|
||||
$resource = hiera($::resource_name)
|
||||
|
||||
$port = "${resource['input']['port']['value']}"
|
||||
$management_port = "${resource['input']['management_port']['value']}"
|
||||
$port = "${resource['input']['port']}"
|
||||
$management_port = "${resource['input']['management_port']}"
|
||||
|
||||
class { '::rabbitmq':
|
||||
service_manage => true,
|
||||
|
15
run.sh
Executable file
15
run.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
# required for ease of development
|
||||
pushd /solar
|
||||
python setup.py develop
|
||||
popd
|
||||
|
||||
pushd /solard
|
||||
python setup.py develop
|
||||
popd
|
||||
|
||||
#used only to start celery on docker
|
||||
ansible-playbook -v -i "localhost," -c local /celery.yaml --skip-tags slave
|
||||
|
||||
tail -f /var/run/celery/*.log
|
@ -34,4 +34,4 @@ pip-accel install -r solar/test-requirements.txt
|
||||
|
||||
pushd solar
|
||||
|
||||
PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE py.test --cov=solar -s solar
|
||||
SOLAR_CONFIG=../.config PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE py.test --cov=solar -s solar/test
|
||||
|
@ -19,3 +19,8 @@ celery
|
||||
mock
|
||||
multipledispatch==0.4.8
|
||||
pydot
|
||||
bunch
|
||||
# if you want to use riak backend then
|
||||
riak
|
||||
# if you want to use sql backend then
|
||||
# peewee
|
||||
|
@ -0,0 +1 @@
|
||||
from solar.dblayer import standalone_session_wrapper
|
@ -25,6 +25,7 @@ import os
|
||||
import sys
|
||||
import tabulate
|
||||
import yaml
|
||||
from collections import defaultdict
|
||||
|
||||
from solar.core import actions
|
||||
from solar.core import resource as sresource
|
||||
@ -33,7 +34,6 @@ from solar.core.tags_set_parser import Expression
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.core.log import log
|
||||
from solar import errors
|
||||
from solar.interfaces import orm
|
||||
from solar import utils
|
||||
|
||||
from solar.cli import base
|
||||
@ -45,27 +45,24 @@ from solar.cli.resource import resource as cli_resource
|
||||
|
||||
|
||||
# HELPERS
|
||||
def format_resource_input(resource_input):
|
||||
def format_resource_input(resource_name, resource_input):
|
||||
return '{}::{}'.format(
|
||||
#click.style(resource_name, fg='white', bold=True),
|
||||
resource_input.resource.name,
|
||||
click.style(resource_input.name, fg='yellow')
|
||||
resource_name,
|
||||
click.style(resource_input, fg='yellow')
|
||||
)
|
||||
|
||||
|
||||
def show_emitter_connections(emitter):
|
||||
for emitter_input in emitter.resource_inputs().values():
|
||||
click.echo(
|
||||
'{} -> {}'.format(
|
||||
format_resource_input(emitter_input),
|
||||
'[{}]'.format(
|
||||
', '.join(
|
||||
format_resource_input(r)
|
||||
for r in emitter_input.receivers.as_set()
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
def show_emitter_connections(res):
|
||||
db_obj = res.db_obj
|
||||
d = defaultdict(list)
|
||||
for emitter, receiver, _meta in db_obj.inputs._edges():
|
||||
d[emitter].append(receiver)
|
||||
|
||||
for emitter, receivers in d.iteritems():
|
||||
click.echo("{} -> {}".format(
|
||||
format_resource_input(*emitter),
|
||||
'[{}]'.format(', '.join(
|
||||
format_resource_input(*recv) for recv in receivers))))
|
||||
|
||||
|
||||
@click.group(cls=base.AliasedGroup)
|
||||
@ -80,25 +77,26 @@ def init_actions():
|
||||
@click.option('-d', '--dry-run', default=False, is_flag=True)
|
||||
@click.option('-m', '--dry-run-mapping', default='{}')
|
||||
def run(dry_run_mapping, dry_run, action, tags):
|
||||
if dry_run:
|
||||
dry_run_executor = executors.DryRunExecutor(mapping=json.loads(dry_run_mapping))
|
||||
raise NotImplementedError("Not yet implemented")
|
||||
# if dry_run:
|
||||
# dry_run_executor = executors.DryRunExecutor(mapping=json.loads(dry_run_mapping))
|
||||
|
||||
resources = filter(
|
||||
lambda r: Expression(tags, r.tags).evaluate(),
|
||||
orm.DBResource.all()
|
||||
)
|
||||
# resources = filter(
|
||||
# lambda r: Expression(tags, r.tags).evaluate(),
|
||||
# orm.DBResource.all()
|
||||
# )
|
||||
|
||||
for r in resources:
|
||||
resource_obj = sresource.load(r['id'])
|
||||
actions.resource_action(resource_obj, action)
|
||||
# for r in resources:
|
||||
# resource_obj = sresource.load(r['id'])
|
||||
# actions.resource_action(resource_obj, action)
|
||||
|
||||
if dry_run:
|
||||
click.echo('EXECUTED:')
|
||||
for key in dry_run_executor.executed:
|
||||
click.echo('{}: {}'.format(
|
||||
click.style(dry_run_executor.compute_hash(key), fg='green'),
|
||||
str(key)
|
||||
))
|
||||
# if dry_run:
|
||||
# click.echo('EXECUTED:')
|
||||
# for key in dry_run_executor.executed:
|
||||
# click.echo('{}: {}'.format(
|
||||
# click.style(dry_run_executor.compute_hash(key), fg='green'),
|
||||
# str(key)
|
||||
# ))
|
||||
|
||||
|
||||
def init_cli_connect():
|
||||
@ -133,7 +131,7 @@ def init_cli_connect():
|
||||
receiver = sresource.load(receiver)
|
||||
click.echo(emitter)
|
||||
click.echo(receiver)
|
||||
signals.disconnect(emitter, receiver)
|
||||
emitter.disconnect(receiver)
|
||||
|
||||
show_emitter_connections(emitter)
|
||||
|
||||
@ -152,9 +150,11 @@ def init_cli_connections():
|
||||
@connections.command()
|
||||
@click.option('--start-with', default=None)
|
||||
@click.option('--end-with', default=None)
|
||||
def graph(start_with, end_with):
|
||||
@click.option('--details', is_flag=True, default=False)
|
||||
def graph(start_with, end_with, details):
|
||||
g = signals.detailed_connection_graph(start_with=start_with,
|
||||
end_with=end_with)
|
||||
end_with=end_with,
|
||||
details=details)
|
||||
|
||||
nx.write_dot(g, 'graph.dot')
|
||||
fabric_api.local('dot -Tsvg graph.dot -o graph.svg')
|
||||
|
@ -48,13 +48,6 @@ def create(plan):
|
||||
click.echo(uid)
|
||||
|
||||
|
||||
@orchestration.command()
|
||||
@click.argument('uid', type=SOLARUID)
|
||||
@click.argument('plan')
|
||||
def update(uid, plan):
|
||||
graph.update_plan(uid, plan)
|
||||
|
||||
|
||||
def wait_report(uid, timeout, interval=3):
|
||||
try:
|
||||
if timeout:
|
||||
@ -114,7 +107,7 @@ def filter(uid, start, end):
|
||||
errors = filters.filter(plan, start=start, end=end)
|
||||
if errors:
|
||||
raise click.ClickException('\n'.join(errors))
|
||||
graph.save_graph(uid, plan)
|
||||
graph.update_graph(plan)
|
||||
utils.write_graph(plan)
|
||||
click.echo('Created {name}.png'.format(name=plan.graph['name']))
|
||||
|
||||
|
@ -25,7 +25,6 @@ from solar.core import resource as sresource
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.core.log import log
|
||||
from solar import errors
|
||||
from solar.interfaces import orm
|
||||
from solar import utils
|
||||
|
||||
from solar.cli import executors
|
||||
@ -82,26 +81,43 @@ def backtrack_single(i):
|
||||
return (format_input(i), backtrack_single(bi))
|
||||
|
||||
@resource.command()
|
||||
@click.option('-v', '--values', default=False, is_flag=True)
|
||||
@click.option('-r', '--real_values', default=False, is_flag=True)
|
||||
@click.option('-i', '--input', default=None)
|
||||
@click.argument('resource')
|
||||
def backtrack_inputs(resource):
|
||||
def backtrack_inputs(resource, input, values, real_values):
|
||||
r = sresource.load(resource)
|
||||
|
||||
for i in r.resource_inputs().values():
|
||||
click.echo(yaml.safe_dump({i.name: backtrack_single(i)}, default_flow_style=False))
|
||||
db_obj = r.db_obj
|
||||
def single(resource, name, get_val=False):
|
||||
db_obj = sresource.load(resource).db_obj
|
||||
se = db_obj.inputs._single_edge(name)
|
||||
se = tuple(se)
|
||||
if not se:
|
||||
if get_val:
|
||||
return dict(resource=resource, name=name, value=db_obj.inputs[name])
|
||||
else:
|
||||
return dict(resource=resource, name=name)
|
||||
l = []
|
||||
for (rname, rinput), _, meta in se:
|
||||
l.append(dict(resource=resource, name=name))
|
||||
val = single(rname, rinput, get_val)
|
||||
if meta and isinstance(val, dict):
|
||||
val['meta'] = meta
|
||||
l.append(val)
|
||||
return l
|
||||
|
||||
inps = {}
|
||||
if input:
|
||||
inps[input] = single(resource, input, values)
|
||||
else:
|
||||
for _inp in db_obj.inputs:
|
||||
inps[_inp] = single(resource, _inp, values)
|
||||
|
||||
@resource.command()
|
||||
@click.argument('resource')
|
||||
@click.argument('input_name')
|
||||
def effective_input_value(resource, input_name):
|
||||
r = sresource.load(resource)
|
||||
inp = r.resource_inputs()[input_name]
|
||||
click.echo(yaml.safe_dump(backtrack_single(inp), default_flow_style=False))
|
||||
click.echo('-' * 20)
|
||||
val = inp.backtrack_value()
|
||||
click.echo(val)
|
||||
click.echo('-' * 20)
|
||||
|
||||
for name, values in inps.iteritems():
|
||||
click.echo(yaml.safe_dump({name: values}, default_flow_style=False))
|
||||
if real_values:
|
||||
click.echo('! Real value: %r' % sresource.load(resource).db_obj.inputs[name] , nl=True)
|
||||
|
||||
@resource.command()
|
||||
def compile_all():
|
||||
@ -120,8 +136,10 @@ def compile_all():
|
||||
|
||||
@resource.command()
|
||||
def clear_all():
|
||||
from solar.dblayer.model import ModelMeta
|
||||
click.echo('Clearing all resources and connections')
|
||||
orm.db.clear()
|
||||
ModelMeta.remove_all()
|
||||
|
||||
|
||||
@resource.command()
|
||||
@click.argument('name')
|
||||
@ -145,23 +163,24 @@ def create(args, base_path, name):
|
||||
@resource.command()
|
||||
@click.option('--name', '-n', default=None)
|
||||
@click.option('--tag', '-t', multiple=True)
|
||||
@click.option('--json', default=False, is_flag=True)
|
||||
@click.option('--as_json', default=False, is_flag=True)
|
||||
@click.option('--color', default=True, is_flag=True)
|
||||
def show(name, tag, json, color):
|
||||
def show(name, tag, as_json, color):
|
||||
echo = click.echo_via_pager
|
||||
if name:
|
||||
resources = [sresource.load(name)]
|
||||
echo = click.echo
|
||||
elif tag:
|
||||
resources = sresource.load_by_tags(set(tag))
|
||||
else:
|
||||
resources = sresource.load_all()
|
||||
|
||||
echo = click.echo_via_pager
|
||||
if json:
|
||||
output = json.dumps([r.to_dict() for r in resources], indent=2)
|
||||
if as_json:
|
||||
output = json.dumps([r.to_dict(inputs=True) for r in resources], indent=2)
|
||||
echo = click.echo
|
||||
else:
|
||||
if color:
|
||||
formatter = lambda r: r.color_repr()
|
||||
formatter = lambda r: r.color_repr(inputs=True)
|
||||
else:
|
||||
formatter = lambda r: unicode(r)
|
||||
output = '\n'.join(formatter(r) for r in resources)
|
||||
|
@ -42,24 +42,25 @@ def validate():
|
||||
@changes.command()
|
||||
@click.option('-d', default=False, is_flag=True, help='detailed view')
|
||||
def stage(d):
|
||||
log = list(change.stage_changes().reverse())
|
||||
log = change.stage_changes()
|
||||
log.reverse()
|
||||
for item in log:
|
||||
click.echo(item)
|
||||
click.echo(data.compact(item))
|
||||
if d:
|
||||
for line in item.details:
|
||||
for line in data.details(item.diff):
|
||||
click.echo(' '*4+line)
|
||||
if not log:
|
||||
click.echo('No changes')
|
||||
|
||||
@changes.command(name='staged-item')
|
||||
@click.argument('log_action')
|
||||
def staged_item(log_action):
|
||||
item = data.SL().get(log_action)
|
||||
@click.argument('uid')
|
||||
def staged_item(uid):
|
||||
item = data.LogItem.get(uid)
|
||||
if not item:
|
||||
click.echo('No staged changes for {}'.format(log_action))
|
||||
else:
|
||||
click.echo(item)
|
||||
for line in item.details:
|
||||
click.echo(data.compact(item))
|
||||
for line in data.details(item.diff):
|
||||
click.echo(' '*4+line)
|
||||
|
||||
@changes.command()
|
||||
@ -80,15 +81,15 @@ def commit(uid):
|
||||
@click.option('-d', default=False, is_flag=True, help='detailed view')
|
||||
@click.option('-s', default=False, is_flag=True, help='short view, only uid')
|
||||
def history(n, d, s):
|
||||
log = list(data.CL().collection(n))
|
||||
log = data.CL()
|
||||
for item in log:
|
||||
if s:
|
||||
click.echo(item.uid)
|
||||
continue
|
||||
|
||||
click.echo(item)
|
||||
click.echo(data.compact(item))
|
||||
if d:
|
||||
for line in item.details:
|
||||
for line in data.details(item.diff):
|
||||
click.echo(' '*4+line)
|
||||
if not log:
|
||||
click.echo('No history')
|
||||
@ -141,8 +142,7 @@ def test(name):
|
||||
|
||||
@changes.command(name='clean-history')
|
||||
def clean_history():
|
||||
data.CL().clean()
|
||||
data.CD().clean()
|
||||
change.clear_history()
|
||||
|
||||
@changes.command(help='USE ONLY FOR TESTING')
|
||||
def commit():
|
||||
|
63
solar/solar/config.py
Normal file
63
solar/solar/config.py
Normal file
@ -0,0 +1,63 @@
|
||||
import os
|
||||
import yaml
|
||||
from bunch import Bunch
|
||||
|
||||
CWD = os.getcwd()
|
||||
|
||||
C = Bunch()
|
||||
C.redis = Bunch(port='6379', host='10.0.0.2')
|
||||
C.solar_db = Bunch(mode='riak', port='8087', host='10.0.0.2', protocol='pbc')
|
||||
|
||||
|
||||
def _lookup_vals(setter, config, prefix=None):
|
||||
for key, val in config.iteritems():
|
||||
if prefix is None:
|
||||
sub = [key]
|
||||
else:
|
||||
sub = prefix + [key]
|
||||
if isinstance(val, Bunch):
|
||||
_lookup_vals(setter, val, sub)
|
||||
else:
|
||||
setter(config, sub)
|
||||
|
||||
|
||||
def from_configs():
|
||||
|
||||
paths = [
|
||||
os.getenv('SOLAR_CONFIG', os.path.join(CWD, '.config')),
|
||||
os.path.join(CWD, '.config.override')
|
||||
]
|
||||
data = {}
|
||||
|
||||
def _load_from_path(data, path):
|
||||
with open(path) as f:
|
||||
loaded = yaml.load(f)
|
||||
if loaded:
|
||||
data.update(loaded)
|
||||
|
||||
for path in paths:
|
||||
if not os.path.exists(path):
|
||||
continue
|
||||
with open(path) as f:
|
||||
loaded = yaml.load(f)
|
||||
if loaded:
|
||||
data.update(loaded)
|
||||
|
||||
def _setter(config, path):
|
||||
vals = data
|
||||
for key in path:
|
||||
vals = vals[key]
|
||||
config[path[-1]] = vals
|
||||
if data:
|
||||
_lookup_vals(_setter, C)
|
||||
|
||||
|
||||
def from_env():
|
||||
def _setter(config, path):
|
||||
env_key = '_'.join(path).upper()
|
||||
if env_key in os.environ:
|
||||
config[path[-1]] = os.environ[env_key]
|
||||
_lookup_vals(_setter, C)
|
||||
|
||||
from_configs()
|
||||
from_env()
|
@ -25,7 +25,7 @@ from solar import errors
|
||||
# - puppet is installed
|
||||
class Puppet(TempFileHandler):
|
||||
def action(self, resource, action_name):
|
||||
log.debug('Executing Puppet manifest %s %s', action_name, resource)
|
||||
log.debug('Executing Puppet manifest %s %s', action_name, resource.name)
|
||||
|
||||
action_file = self._compile_action_file(resource, action_name)
|
||||
log.debug('action_file: %s', action_file)
|
||||
@ -62,7 +62,7 @@ class Puppet(TempFileHandler):
|
||||
return cmd
|
||||
|
||||
def _make_args(self, resource):
|
||||
return {resource.name: resource.to_dict()}
|
||||
return {resource.name: {'input': resource.args}}
|
||||
|
||||
def upload_hiera_resource(self, resource):
|
||||
src = '/tmp/puppet_{}.yaml'.format(resource.name)
|
||||
|
@ -12,4 +12,4 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from .resource import Resource, load, load_all, validate_resources, load_by_tags
|
||||
from .resource import Resource, load, load_all, validate_resources, load_by_tags, load_updated, RESOURCE_STATE
|
||||
|
@ -22,13 +22,20 @@ import os
|
||||
from solar import utils
|
||||
|
||||
from solar.core import validation
|
||||
from solar.interfaces import orm
|
||||
from solar.core import signals
|
||||
from solar.events import api
|
||||
|
||||
from uuid import uuid4
|
||||
from hashlib import md5
|
||||
import networkx
|
||||
|
||||
from solar.dblayer.solar_models import CommitedResource
|
||||
|
||||
from solar.dblayer.solar_models import Resource as DBResource
|
||||
from solar.dblayer.model import StrInt
|
||||
from solar.core.signals import get_mapping
|
||||
|
||||
from solar.dblayer.model import StrInt
|
||||
|
||||
|
||||
def read_meta(base_path):
|
||||
@ -73,29 +80,29 @@ class Resource(object):
|
||||
inputs = metadata.get('input', {})
|
||||
|
||||
self.auto_extend_inputs(inputs)
|
||||
|
||||
self.db_obj = orm.DBResource(**{
|
||||
'id': name,
|
||||
'name': name,
|
||||
'actions_path': metadata.get('actions_path', ''),
|
||||
'actions': metadata.get('actions', ''),
|
||||
'base_name': metadata.get('base_name', ''),
|
||||
'base_path': metadata.get('base_path', ''),
|
||||
'handler': metadata.get('handler', ''),
|
||||
'puppet_module': metadata.get('puppet_module', ''),
|
||||
'version': metadata.get('version', ''),
|
||||
'meta_inputs': inputs,
|
||||
'tags': tags
|
||||
|
||||
})
|
||||
self.db_obj.state = RESOURCE_STATE.created.name
|
||||
self.db_obj.tags = tags or []
|
||||
self.db_obj.save()
|
||||
|
||||
self.db_obj = DBResource.from_dict(
|
||||
name,
|
||||
{
|
||||
'id': name,
|
||||
'name': name,
|
||||
'actions_path': metadata.get('actions_path', ''),
|
||||
'actions': metadata.get('actions', {}),
|
||||
'base_name': metadata.get('base_name', ''),
|
||||
'base_path': metadata.get('base_path', ''),
|
||||
'handler': metadata.get('handler', ''),
|
||||
'puppet_module': metadata.get('puppet_module', ''),
|
||||
'version': metadata.get('version', ''),
|
||||
'meta_inputs': inputs,
|
||||
'tags': tags,
|
||||
'state': RESOURCE_STATE.created.name
|
||||
})
|
||||
self.create_inputs(args)
|
||||
|
||||
self.db_obj.save()
|
||||
|
||||
|
||||
# Load
|
||||
@dispatch(orm.DBResource)
|
||||
@dispatch(DBResource)
|
||||
def __init__(self, resource_db):
|
||||
self.db_obj = resource_db
|
||||
self.name = resource_db.name
|
||||
@ -116,14 +123,12 @@ class Resource(object):
|
||||
inputs[inp]['value'] = md5(self.name + uuid4().hex).hexdigest()
|
||||
|
||||
def transports(self):
|
||||
inputs = self.resource_inputs()
|
||||
transports_id = inputs['transports_id']
|
||||
return transports_id.backtrack_value(other_val='transports')
|
||||
db_obj = self.db_obj
|
||||
return db_obj.inputs._get_field_val('transports_id', other='transports')
|
||||
|
||||
def ip(self):
|
||||
inputs = self.resource_inputs()
|
||||
transports_id = inputs['location_id']
|
||||
return transports_id.backtrack_value(other_val='ip')
|
||||
db_obj = self.db_obj
|
||||
return db_obj.inputs._get_field_val('location_id', other='ip')
|
||||
|
||||
@property
|
||||
def actions(self):
|
||||
@ -147,15 +152,15 @@ class Resource(object):
|
||||
args = args or {}
|
||||
for name, v in self.db_obj.meta_inputs.items():
|
||||
value = args.get(name, v.get('value'))
|
||||
|
||||
self.db_obj.add_input(name, v['schema'], value)
|
||||
self.db_obj.inputs[name] = value
|
||||
|
||||
@property
|
||||
def args(self):
|
||||
ret = {}
|
||||
for i in self.resource_inputs().values():
|
||||
ret[i.name] = i.backtrack_value()
|
||||
return ret
|
||||
return self.db_obj.inputs.as_dict()
|
||||
# ret = {}
|
||||
# for i in self.resource_inputs().values():
|
||||
# ret[i.name] = i.backtrack_value()
|
||||
# return ret
|
||||
|
||||
def update(self, args):
|
||||
# TODO: disconnect input when it is updated and end_node
|
||||
@ -164,9 +169,8 @@ class Resource(object):
|
||||
resource_inputs = self.resource_inputs()
|
||||
|
||||
for k, v in args.items():
|
||||
i = resource_inputs[k]
|
||||
i.value = v
|
||||
i.save()
|
||||
self.db_obj.inputs[k] = v
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
def delete(self):
|
||||
return self.db_obj.delete()
|
||||
@ -176,19 +180,19 @@ class Resource(object):
|
||||
self.delete()
|
||||
else:
|
||||
self.db_obj.state = RESOURCE_STATE.removed.name
|
||||
self.db_obj.save()
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
def set_operational(self):
|
||||
self.db_obj.state = RESOURCE_STATE.operational.name
|
||||
self.db_obj.save()
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
def set_error(self):
|
||||
self.db_obj.state = RESOURCE_STATE.error.name
|
||||
self.db_obj.save()
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
def set_created(self):
|
||||
self.db_obj.state = RESOURCE_STATE.created.name
|
||||
self.db_obj.save()
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
def to_be_removed(self):
|
||||
return self.db_obj.state == RESOURCE_STATE.removed.name
|
||||
@ -198,10 +202,14 @@ class Resource(object):
|
||||
return self.db_obj.tags
|
||||
|
||||
def add_tags(self, *tags):
|
||||
self.db_obj.add_tags(*tags)
|
||||
for tag in tags:
|
||||
self.db_obj.tags.set(tag)
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
def remove_tags(self, *tags):
|
||||
self.db_obj.remove_tags(*tags)
|
||||
for tag in tags:
|
||||
self.db_obj.tags.remove(tag)
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
@property
|
||||
def connections(self):
|
||||
@ -210,55 +218,64 @@ class Resource(object):
|
||||
stored as:
|
||||
[(emitter, emitter_input, receiver, receiver_input), ...]
|
||||
"""
|
||||
rst = []
|
||||
for emitter, receiver, meta in self.db_obj.graph().edges(data=True):
|
||||
rst = set()
|
||||
for (emitter_resource, emitter_input), (receiver_resource, receiver_input), meta in self.graph().edges(data=True):
|
||||
if meta:
|
||||
receiver_input = '{}:{}|{}'.format(receiver.name,
|
||||
receiver_input = '{}:{}|{}'.format(receiver_input,
|
||||
meta['destination_key'], meta['tag'])
|
||||
else:
|
||||
receiver_input = receiver.name
|
||||
|
||||
rst.append(
|
||||
[emitter.resource.name, emitter.name,
|
||||
receiver.resource.name, receiver_input])
|
||||
return rst
|
||||
rst.add(
|
||||
(emitter_resource, emitter_input,
|
||||
receiver_resource, receiver_input))
|
||||
return [list(i) for i in rst]
|
||||
|
||||
def graph(self):
|
||||
mdg = networkx.MultiDiGraph()
|
||||
for u, v, data in self.db_obj.inputs._edges():
|
||||
mdg.add_edge(u, v, attr_dict=data)
|
||||
return mdg
|
||||
|
||||
def resource_inputs(self):
|
||||
return {
|
||||
i.name: i for i in self.db_obj.inputs.as_set()
|
||||
}
|
||||
return self.db_obj.inputs
|
||||
|
||||
def to_dict(self):
|
||||
def to_dict(self, inputs=False):
|
||||
ret = self.db_obj.to_dict()
|
||||
ret['input'] = {}
|
||||
for k, v in self.args.items():
|
||||
ret['input'][k] = {
|
||||
'value': v,
|
||||
}
|
||||
|
||||
if inputs:
|
||||
ret['inputs'] = self.db_obj.inputs.as_dict()
|
||||
return ret
|
||||
|
||||
def color_repr(self):
|
||||
def color_repr(self, inputs=False):
|
||||
import click
|
||||
|
||||
arg_color = 'yellow'
|
||||
|
||||
return ("{resource_s}({name_s}='{id}', {base_path_s}={base_path} "
|
||||
"{args_s}={input}, {tags_s}={tags})").format(
|
||||
return ("{resource_s}({name_s}='{key}', {base_path_s}={base_path} "
|
||||
"{args_s}={inputs}, {tags_s}={tags})").format(
|
||||
resource_s=click.style('Resource', fg='white', bold=True),
|
||||
name_s=click.style('name', fg=arg_color, bold=True),
|
||||
base_path_s=click.style('base_path', fg=arg_color, bold=True),
|
||||
args_s=click.style('args', fg=arg_color, bold=True),
|
||||
tags_s=click.style('tags', fg=arg_color, bold=True),
|
||||
**self.to_dict()
|
||||
**self.to_dict(inputs)
|
||||
)
|
||||
|
||||
def load_commited(self):
|
||||
return orm.DBCommitedState.get_or_create(self.name)
|
||||
return CommitedResource.get_or_create(self.name)
|
||||
|
||||
def _connect_inputs(self, receiver, mapping):
|
||||
if isinstance(mapping, set):
|
||||
mapping = dict((x, x) for x in mapping)
|
||||
self.db_obj.connect(receiver.db_obj, mapping=mapping)
|
||||
self.db_obj.save_lazy()
|
||||
receiver.db_obj.save_lazy()
|
||||
|
||||
|
||||
def connect_with_events(self, receiver, mapping=None, events=None,
|
||||
use_defaults=False):
|
||||
signals.connect(self, receiver, mapping=mapping)
|
||||
mapping = get_mapping(self, receiver, mapping)
|
||||
self._connect_inputs(receiver, mapping)
|
||||
# signals.connect(self, receiver, mapping=mapping)
|
||||
# TODO: implement events
|
||||
if use_defaults:
|
||||
api.add_default_events(self, receiver)
|
||||
if events:
|
||||
@ -268,10 +285,16 @@ class Resource(object):
|
||||
return self.connect_with_events(
|
||||
receiver, mapping=mapping, events=events, use_defaults=True)
|
||||
|
||||
def disconnect(self, receiver):
|
||||
inputs = self.db_obj.inputs.keys()
|
||||
self.db_obj.disconnect(other=receiver.db_obj, inputs=inputs)
|
||||
receiver.db_obj.save_lazy()
|
||||
self.db_obj.save_lazy()
|
||||
|
||||
|
||||
|
||||
def load(name):
|
||||
r = orm.DBResource.load(name)
|
||||
r = DBResource.get(name)
|
||||
|
||||
if not r:
|
||||
raise Exception('Resource {} does not exist in DB'.format(name))
|
||||
@ -279,14 +302,29 @@ def load(name):
|
||||
return Resource(r)
|
||||
|
||||
|
||||
def load_updated(since=None, with_childs=True):
|
||||
if since is None:
|
||||
startkey = StrInt.p_min()
|
||||
else:
|
||||
startkey = since
|
||||
candids = DBResource.updated.filter(startkey, StrInt.p_max())
|
||||
if with_childs:
|
||||
candids = DBResource.childs(candids)
|
||||
return [Resource(r) for r in DBResource.multi_get(candids)]
|
||||
|
||||
# TODO
|
||||
def load_all():
|
||||
return [Resource(r) for r in orm.DBResource.load_all()]
|
||||
candids = DBResource.updated.filter(StrInt.p_min(), StrInt.p_max())
|
||||
return [Resource(r) for r in DBResource.multi_get(candids)]
|
||||
|
||||
|
||||
def load_by_tags(tags):
|
||||
tags = set(tags)
|
||||
return [Resource(r) for r in orm.DBResource.load_all()
|
||||
if tags.issubset(set(r.tags))]
|
||||
candids_all = set()
|
||||
for tag in tags:
|
||||
candids = DBResource.tags.filter(tag)
|
||||
candids_all.update(set(candids))
|
||||
return [Resource(r) for r in DBResource.multi_get(candids_all)]
|
||||
|
||||
|
||||
def validate_resources():
|
||||
|
@ -16,7 +16,7 @@
|
||||
import networkx
|
||||
|
||||
from solar.core.log import log
|
||||
from solar.interfaces import orm
|
||||
from solar.dblayer.solar_models import Resource as DBResource
|
||||
|
||||
|
||||
def guess_mapping(emitter, receiver):
|
||||
@ -38,10 +38,10 @@ def guess_mapping(emitter, receiver):
|
||||
:return:
|
||||
"""
|
||||
guessed = {}
|
||||
for key in emitter.args:
|
||||
if key in receiver.args:
|
||||
guessed[key] = key
|
||||
|
||||
for key in emitter.db_obj.meta_inputs:
|
||||
if key in receiver.db_obj.meta_inputs:
|
||||
guessed[key] = key
|
||||
return guessed
|
||||
|
||||
|
||||
@ -68,10 +68,12 @@ def location_and_transports(emitter, receiver, orig_mapping):
|
||||
# will be deleted too
|
||||
if inps_emitter and inps_receiver:
|
||||
if not inps_emitter == inps_receiver:
|
||||
log.warning("Different %r defined %r => %r", single, emitter.name, receiver.name)
|
||||
if not '::' in inps_receiver:
|
||||
pass
|
||||
# log.warning("Different %r defined %r => %r", single, emitter.name, receiver.name)
|
||||
return
|
||||
else:
|
||||
log.debug("The same %r defined for %r => %r, skipping", single, emitter.name, receiver.name)
|
||||
# log.debug("The same %r defined for %r => %r, skipping", single, emitter.name, receiver.name)
|
||||
return
|
||||
emitter_single = emitter.db_obj.meta_inputs[single]
|
||||
receiver_single = receiver.db_obj.meta_inputs[single]
|
||||
@ -92,7 +94,7 @@ def location_and_transports(emitter, receiver, orig_mapping):
|
||||
# like adding ssh_transport for solard_transport and we don't want then
|
||||
# transports_id to be messed
|
||||
# it forbids passing this value around
|
||||
log.debug("Disabled %r mapping for %r", single, emitter.name)
|
||||
# log.debug("Disabled %r mapping for %r", single, emitter.name)
|
||||
return
|
||||
if receiver_single.get('is_own') is False:
|
||||
# this case is when we connect resource which has location_id but that is
|
||||
@ -102,11 +104,13 @@ def location_and_transports(emitter, receiver, orig_mapping):
|
||||
# connect in other direction
|
||||
if emitter_single_reverse:
|
||||
if receiver_single_reverse:
|
||||
connect_single(receiver, single, emitter, single)
|
||||
# TODO: this should be moved to other place
|
||||
receiver._connect_inputs(emitter, {single: single})
|
||||
_remove_from_mapping(single)
|
||||
return
|
||||
if receiver_single_reverse:
|
||||
connect_single(receiver, single, emitter, single)
|
||||
# TODO: this should be moved to other place
|
||||
receiver._connect_inputs(emitter, {single: single})
|
||||
_remove_from_mapping(single)
|
||||
return
|
||||
if isinstance(orig_mapping, dict):
|
||||
@ -114,11 +118,12 @@ def location_and_transports(emitter, receiver, orig_mapping):
|
||||
|
||||
# XXX: that .args is slow on current backend
|
||||
# would be faster or another
|
||||
inps_emitter = emitter.args
|
||||
inps_receiver = receiver.args
|
||||
inps_emitter = emitter.db_obj.inputs
|
||||
inps_receiver = receiver.db_obj.inputs
|
||||
# XXX: should be somehow parametrized (input attribute?)
|
||||
# with dirty_state_ok(DBResource, ('index', )):
|
||||
for single in ('transports_id', 'location_id'):
|
||||
if single in inps_emitter and inps_receiver:
|
||||
if single in inps_emitter and single in inps_receiver:
|
||||
_single(single, emitter, receiver, inps_emitter[single], inps_receiver[single])
|
||||
else:
|
||||
log.warning('Unable to create connection for %s with'
|
||||
@ -127,136 +132,58 @@ def location_and_transports(emitter, receiver, orig_mapping):
|
||||
return
|
||||
|
||||
|
||||
def connect(emitter, receiver, mapping=None):
|
||||
def get_mapping(emitter, receiver, mapping=None):
|
||||
if mapping is None:
|
||||
mapping = guess_mapping(emitter, receiver)
|
||||
|
||||
# XXX: we didn't agree on that "reverse" there
|
||||
location_and_transports(emitter, receiver, mapping)
|
||||
|
||||
if isinstance(mapping, set):
|
||||
mapping = {src: src for src in mapping}
|
||||
|
||||
for src, dst in mapping.items():
|
||||
if not isinstance(dst, list):
|
||||
dst = [dst]
|
||||
|
||||
for d in dst:
|
||||
connect_single(emitter, src, receiver, d)
|
||||
return mapping
|
||||
|
||||
|
||||
def connect_single(emitter, src, receiver, dst):
|
||||
if ':' in dst:
|
||||
return connect_multi(emitter, src, receiver, dst)
|
||||
|
||||
# Disconnect all receiver inputs
|
||||
# Check if receiver input is of list type first
|
||||
emitter_input = emitter.resource_inputs()[src]
|
||||
receiver_input = receiver.resource_inputs()[dst]
|
||||
|
||||
if emitter_input.id == receiver_input.id:
|
||||
raise Exception(
|
||||
'Trying to connect {} to itself, this is not possible'.format(
|
||||
emitter_input.id)
|
||||
)
|
||||
|
||||
if not receiver_input.is_list:
|
||||
receiver_input.receivers.delete_all_incoming(receiver_input)
|
||||
|
||||
# Check for cycles
|
||||
# TODO: change to get_paths after it is implemented in drivers
|
||||
if emitter_input in receiver_input.receivers.as_set():
|
||||
raise Exception('Prevented creating a cycle on %s::%s' % (emitter.name,
|
||||
emitter_input.name))
|
||||
|
||||
log.debug('Connecting {}::{} -> {}::{}'.format(
|
||||
emitter.name, emitter_input.name, receiver.name, receiver_input.name
|
||||
))
|
||||
emitter_input.receivers.add(receiver_input)
|
||||
|
||||
|
||||
def connect_multi(emitter, src, receiver, dst):
|
||||
receiver_input_name, receiver_input_key = dst.split(':')
|
||||
if '|' in receiver_input_key:
|
||||
receiver_input_key, receiver_input_tag = receiver_input_key.split('|')
|
||||
else:
|
||||
receiver_input_tag = None
|
||||
|
||||
emitter_input = emitter.resource_inputs()[src]
|
||||
receiver_input = receiver.resource_inputs()[receiver_input_name]
|
||||
|
||||
if not receiver_input.is_list or receiver_input_tag:
|
||||
receiver_input.receivers.delete_all_incoming(
|
||||
receiver_input,
|
||||
destination_key=receiver_input_key,
|
||||
tag=receiver_input_tag
|
||||
)
|
||||
|
||||
# We can add default tag now
|
||||
receiver_input_tag = receiver_input_tag or emitter.name
|
||||
|
||||
# NOTE: make sure that receiver.args[receiver_input] is of dict type
|
||||
if not receiver_input.is_hash:
|
||||
raise Exception(
|
||||
'Receiver input {} must be a hash or a list of hashes'.format(receiver_input_name)
|
||||
)
|
||||
|
||||
log.debug('Connecting {}::{} -> {}::{}[{}], tag={}'.format(
|
||||
emitter.name, emitter_input.name, receiver.name, receiver_input.name,
|
||||
receiver_input_key,
|
||||
receiver_input_tag
|
||||
))
|
||||
emitter_input.receivers.add_hash(
|
||||
receiver_input,
|
||||
receiver_input_key,
|
||||
tag=receiver_input_tag
|
||||
)
|
||||
def connect(emitter, receiver, mapping=None):
|
||||
emitter.connect(receiver, mapping)
|
||||
|
||||
|
||||
def disconnect_receiver_by_input(receiver, input_name):
|
||||
input_node = receiver.resource_inputs()[input_name]
|
||||
# input_node = receiver.resource_inputs()[input_name]
|
||||
|
||||
input_node.receivers.delete_all_incoming(input_node)
|
||||
# input_node.receivers.delete_all_incoming(input_node)
|
||||
receiver.db_obj.inputs.disconnect(input_name)
|
||||
|
||||
|
||||
def disconnect(emitter, receiver):
|
||||
for emitter_input in emitter.resource_inputs().values():
|
||||
for receiver_input in receiver.resource_inputs().values():
|
||||
emitter_input.receivers.remove(receiver_input)
|
||||
def detailed_connection_graph(start_with=None, end_with=None, details=False):
|
||||
from solar.core.resource import Resource, load_all
|
||||
|
||||
if details:
|
||||
def format_for_edge(resource, input):
|
||||
return '"{}/{}"'.format(resource, input)
|
||||
else:
|
||||
def format_for_edge(resource, input):
|
||||
input = input.split(':', 1)[0]
|
||||
return '"{}/{}"'.format(resource, input)
|
||||
|
||||
def detailed_connection_graph(start_with=None, end_with=None):
|
||||
resource_inputs_graph = orm.DBResource.inputs.graph()
|
||||
inputs_graph = orm.DBResourceInput.receivers.graph()
|
||||
res_props = {'color': 'yellowgreen',
|
||||
'style': 'filled'}
|
||||
inp_props = {'color': 'lightskyblue',
|
||||
'style': 'filled, rounded'}
|
||||
|
||||
def node_attrs(n):
|
||||
if isinstance(n, orm.DBResource):
|
||||
return {
|
||||
'color': 'yellowgreen',
|
||||
'style': 'filled',
|
||||
}
|
||||
elif isinstance(n, orm.DBResourceInput):
|
||||
return {
|
||||
'color': 'lightskyblue',
|
||||
'style': 'filled, rounded',
|
||||
}
|
||||
graph = networkx.DiGraph()
|
||||
|
||||
def format_name(i):
|
||||
if isinstance(i, orm.DBResource):
|
||||
return '"{}"'.format(i.name)
|
||||
elif isinstance(i, orm.DBResourceInput):
|
||||
return '{}/{}'.format(i.resource.name, i.name)
|
||||
resources = load_all()
|
||||
|
||||
for r, i in resource_inputs_graph.edges():
|
||||
inputs_graph.add_edge(r, i)
|
||||
|
||||
ret = networkx.MultiDiGraph()
|
||||
|
||||
for u, v in inputs_graph.edges():
|
||||
u_n = format_name(u)
|
||||
v_n = format_name(v)
|
||||
ret.add_edge(u_n, v_n)
|
||||
ret.node[u_n] = node_attrs(u)
|
||||
ret.node[v_n] = node_attrs(v)
|
||||
|
||||
return ret
|
||||
for resource in resources:
|
||||
res_node = '{}'.format(resource.name)
|
||||
for name in resource.db_obj.meta_inputs:
|
||||
resource_input = format_for_edge(resource.name, name)
|
||||
graph.add_edge(resource.name, resource_input)
|
||||
graph.node[resource_input] = inp_props
|
||||
conns = resource.connections
|
||||
for (emitter_resource, emitter_input, receiver_resource, receiver_input) in conns:
|
||||
e = format_for_edge(emitter_resource, emitter_input)
|
||||
r = format_for_edge(receiver_resource, receiver_input)
|
||||
graph.add_edge(emitter_resource, e)
|
||||
graph.add_edge(receiver_resource, r)
|
||||
graph.add_edge(e, r)
|
||||
graph.node[e] = inp_props
|
||||
graph.node[r] = inp_props
|
||||
graph.node[res_node] = res_props
|
||||
return graph
|
||||
|
@ -174,11 +174,11 @@ def validate_resource(r):
|
||||
inputs = r.resource_inputs()
|
||||
args = r.args
|
||||
|
||||
for input_name, input_definition in inputs.items():
|
||||
for input_name, _ in inputs.items():
|
||||
errors = validate_input(
|
||||
args.get(input_name),
|
||||
#jsonschema=input_definition.get('jsonschema'),
|
||||
schema=input_definition.schema
|
||||
schema=r.db_obj.meta_inputs[input_name]['schema']
|
||||
)
|
||||
if errors:
|
||||
ret[input_name] = errors
|
||||
|
33
solar/solar/dblayer/__init__.py
Normal file
33
solar/solar/dblayer/__init__.py
Normal file
@ -0,0 +1,33 @@
|
||||
from solar.dblayer.model import ModelMeta
|
||||
from solar.dblayer.riak_client import RiakClient
|
||||
from solar.config import C
|
||||
|
||||
|
||||
if C.solar_db.mode == 'sqlite':
|
||||
from solar.dblayer.sql_client import SqlClient
|
||||
if C.solar_db.backend == 'memory':
|
||||
client = SqlClient(C.solar_db.location, threadlocals=False, autocommit=False)
|
||||
elif C.solar_db.backend == 'file':
|
||||
client = SqlClient(C.solar_db.location, threadlocals=True,
|
||||
autocommit=False, pragmas=(('journal_mode', 'WAL'),
|
||||
('synchronous', 'NORMAL')))
|
||||
else:
|
||||
raise Exception('Unknown sqlite backend %s', C.solar_db.backend)
|
||||
|
||||
elif C.solar_db.mode == 'riak':
|
||||
from solar.dblayer.riak_client import RiakClient
|
||||
if C.solar_db.protocol == 'pbc':
|
||||
client = RiakClient(
|
||||
protocol=C.solar_db.protocol, host=C.solar_db.host, pb_port=C.solar_db.port)
|
||||
elif C.solar_db.protocol == 'http':
|
||||
client = RiakClient(
|
||||
protocol=C.solar_db.protocol, host=C.solar_db.host, http_port=C.solar_db.port)
|
||||
else:
|
||||
raise Exception('Unknown riak protocol %s', C.solar_db.protocol)
|
||||
else:
|
||||
raise Exception('Unknown dblayer backend %s', C.dblayer)
|
||||
|
||||
ModelMeta.setup(client)
|
||||
|
||||
from solar.dblayer import standalone_session_wrapper
|
||||
standalone_session_wrapper.create_all()
|
19
solar/solar/dblayer/conflict_resolution.py
Normal file
19
solar/solar/dblayer/conflict_resolution.py
Normal file
@ -0,0 +1,19 @@
|
||||
from collections import Counter
|
||||
|
||||
|
||||
def naive_resolver(riak_object):
|
||||
# for now we support deleted vs existing object
|
||||
siblings = riak_object.siblings
|
||||
siblings_len = map(lambda sibling: (len(sibling._get_encoded_data()), sibling), siblings)
|
||||
siblings_len.sort()
|
||||
c = Counter((x[0] for x in siblings_len))
|
||||
if len(c) > 2:
|
||||
raise RuntimeError("Too many different siblings, not sure what to do with siblings")
|
||||
if not 0 in c:
|
||||
raise RuntimeError("No empty object for resolution, not sure what to do with siblings")
|
||||
selected = max(siblings_len)
|
||||
# TODO: pass info to obj save_lazy too
|
||||
riak_object.siblings = [selected[1]]
|
||||
|
||||
|
||||
dblayer_conflict_resolver = naive_resolver
|
47
solar/solar/dblayer/gevent_helpers.py
Normal file
47
solar/solar/dblayer/gevent_helpers.py
Normal file
@ -0,0 +1,47 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
from gevent.pool import Pool
|
||||
import gevent
|
||||
from solar.dblayer.solar_models import Resource
|
||||
|
||||
|
||||
class DBLayerPool(Pool):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DBLayerPool, self).__init__(*args, **kwargs)
|
||||
self.parent = gevent.getcurrent()
|
||||
|
||||
def spawn(self, *args, **kwargs):
|
||||
greenlet = self.greenlet_class(*args, **kwargs)
|
||||
greenlet._nested_parent = self.parent
|
||||
self.start(greenlet)
|
||||
return greenlet
|
||||
|
||||
|
||||
@classmethod
|
||||
def multi_get(obj, keys):
|
||||
pool = DBLayerPool(5)
|
||||
return pool.map(obj.get, keys)
|
||||
|
||||
|
||||
def solar_map(funct, args, concurrency=5):
|
||||
dp = DBLayerPool(concurrency)
|
||||
return dp.map(funct, args)
|
||||
|
||||
|
||||
def get_local():
|
||||
from solar.dblayer.gevent_local import local
|
||||
return local
|
172
solar/solar/dblayer/gevent_local.py
Normal file
172
solar/solar/dblayer/gevent_local.py
Normal file
@ -0,0 +1,172 @@
|
||||
"""
|
||||
This code is slight modification of gevent.local
|
||||
|
||||
Original file is MIT licensed.
|
||||
|
||||
For details please refer for gevent license
|
||||
"""
|
||||
|
||||
from copy import copy
|
||||
from weakref import ref
|
||||
from contextlib import contextmanager
|
||||
from gevent.hub import getcurrent, PYPY
|
||||
from gevent.lock import RLock
|
||||
|
||||
__all__ = ["local"]
|
||||
|
||||
|
||||
class _wrefdict(dict):
|
||||
"""A dict that can be weak referenced"""
|
||||
|
||||
|
||||
class _localimpl(object):
|
||||
"""A class managing thread-local dicts"""
|
||||
__slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__'
|
||||
|
||||
def __init__(self):
|
||||
# The key used in the Thread objects' attribute dicts.
|
||||
# We keep it a string for speed but make it unlikely to clash with
|
||||
# a "real" attribute.
|
||||
self.key = '_threading_local._localimpl.' + str(id(self))
|
||||
# { id(Thread) -> (ref(Thread), thread-local dict) }
|
||||
self.dicts = _wrefdict()
|
||||
|
||||
def find_parent(self):
|
||||
"""
|
||||
Iterate to top most parent, and use it as a base
|
||||
"""
|
||||
c = getcurrent()
|
||||
while 1:
|
||||
tmp_c = getattr(c, '_nested_parent', c.parent)
|
||||
if not tmp_c:
|
||||
return c
|
||||
c = tmp_c
|
||||
|
||||
def get_dict(self):
|
||||
"""Return the dict for the current thread. Raises KeyError if none
|
||||
defined."""
|
||||
# thread = getcurrent()
|
||||
thread = self.find_parent()
|
||||
return self.dicts[id(thread)][1]
|
||||
|
||||
def create_dict(self):
|
||||
"""Create a new dict for the current thread, and return it."""
|
||||
localdict = {}
|
||||
key = self.key
|
||||
thread = self.find_parent()
|
||||
idt = id(thread)
|
||||
|
||||
# If we are working with a gevent.greenlet.Greenlet, we can
|
||||
# pro-actively clear out with a link. Use rawlink to avoid
|
||||
# spawning any more greenlets
|
||||
try:
|
||||
rawlink = thread.rawlink
|
||||
except AttributeError:
|
||||
# Otherwise we need to do it with weak refs
|
||||
def local_deleted(_, key=key):
|
||||
# When the localimpl is deleted, remove the thread attribute.
|
||||
thread = wrthread()
|
||||
if thread is not None:
|
||||
del thread.__dict__[key]
|
||||
|
||||
def thread_deleted(_, idt=idt):
|
||||
# When the thread is deleted, remove the local dict.
|
||||
# Note that this is suboptimal if the thread object gets
|
||||
# caught in a reference loop. We would like to be called
|
||||
# as soon as the OS-level thread ends instead.
|
||||
_local = wrlocal()
|
||||
if _local is not None:
|
||||
_local.dicts.pop(idt, None)
|
||||
wrlocal = ref(self, local_deleted)
|
||||
wrthread = ref(thread, thread_deleted)
|
||||
thread.__dict__[key] = wrlocal
|
||||
else:
|
||||
wrdicts = ref(self.dicts)
|
||||
|
||||
def clear(_):
|
||||
dicts = wrdicts()
|
||||
if dicts:
|
||||
dicts.pop(idt, None)
|
||||
rawlink(clear)
|
||||
wrthread = None
|
||||
|
||||
self.dicts[idt] = wrthread, localdict
|
||||
return localdict
|
||||
|
||||
|
||||
@contextmanager
|
||||
def _patch(self):
|
||||
impl = object.__getattribute__(self, '_local__impl')
|
||||
orig_dct = object.__getattribute__(self, '__dict__')
|
||||
try:
|
||||
dct = impl.get_dict()
|
||||
except KeyError:
|
||||
# it's OK to acquire the lock here and not earlier, because the above code won't switch out
|
||||
# however, subclassed __init__ might switch, so we do need to acquire the lock here
|
||||
dct = impl.create_dict()
|
||||
args, kw = impl.localargs
|
||||
with impl.locallock:
|
||||
self.__init__(*args, **kw)
|
||||
with impl.locallock:
|
||||
object.__setattr__(self, '__dict__', dct)
|
||||
yield
|
||||
object.__setattr__(self, '__dict__', orig_dct)
|
||||
|
||||
|
||||
class local(object):
|
||||
__slots__ = '_local__impl', '__dict__'
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
if args or kw:
|
||||
if (PYPY and cls.__init__ == object.__init__) or (not PYPY and cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
self = object.__new__(cls)
|
||||
impl = _localimpl()
|
||||
impl.localargs = (args, kw)
|
||||
impl.locallock = RLock()
|
||||
object.__setattr__(self, '_local__impl', impl)
|
||||
# We need to create the thread dict in anticipation of
|
||||
# __init__ being called, to make sure we don't call it
|
||||
# again ourselves.
|
||||
impl.create_dict()
|
||||
return self
|
||||
|
||||
def __getattribute__(self, name):
|
||||
with _patch(self):
|
||||
return object.__getattribute__(self, name)
|
||||
|
||||
def __setattr__(self, name, value):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
with _patch(self):
|
||||
return object.__setattr__(self, name, value)
|
||||
|
||||
def __delattr__(self, name):
|
||||
if name == '__dict__':
|
||||
raise AttributeError(
|
||||
"%r object attribute '__dict__' is read-only"
|
||||
% self.__class__.__name__)
|
||||
with _patch(self):
|
||||
return object.__delattr__(self, name)
|
||||
|
||||
def __copy__(self):
|
||||
impl = object.__getattribute__(self, '_local__impl')
|
||||
current = impl.find_parent()
|
||||
currentId = id(current)
|
||||
d = impl.get_dict()
|
||||
duplicate = copy(d)
|
||||
|
||||
cls = type(self)
|
||||
if (PYPY and cls.__init__ != object.__init__) or (not PYPY and cls.__init__ is not object.__init__):
|
||||
args, kw = impl.localargs
|
||||
instance = cls(*args, **kw)
|
||||
else:
|
||||
instance = cls()
|
||||
|
||||
new_impl = object.__getattribute__(instance, '_local__impl')
|
||||
tpl = new_impl.dicts[currentId]
|
||||
new_impl.dicts[currentId] = (tpl[0], duplicate)
|
||||
|
||||
return instance
|
40
solar/solar/dblayer/gevent_patches.py
Normal file
40
solar/solar/dblayer/gevent_patches.py
Normal file
@ -0,0 +1,40 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
def _patch(obj, name, target):
|
||||
orig = getattr(obj, name)
|
||||
setattr(obj, '_orig_%s' % name, orig)
|
||||
setattr(obj, name, target)
|
||||
|
||||
|
||||
|
||||
def patch_all():
|
||||
from solar.dblayer.model import ModelMeta
|
||||
if ModelMeta._defined_models:
|
||||
raise RuntimeError("You should run patch_multi_get before defining models")
|
||||
from solar.dblayer.model import Model
|
||||
from solar.dblayer.solar_models import InputsFieldWrp
|
||||
|
||||
from solar.dblayer.gevent_helpers import (multi_get,
|
||||
solar_map,
|
||||
get_local)
|
||||
from solar import utils
|
||||
|
||||
|
||||
_patch(Model, 'multi_get', multi_get)
|
||||
|
||||
_patch(utils, 'solar_map', solar_map)
|
||||
_patch(utils, 'get_local', get_local)
|
||||
_patch(Model, '_local', get_local()())
|
910
solar/solar/dblayer/model.py
Normal file
910
solar/solar/dblayer/model.py
Normal file
@ -0,0 +1,910 @@
|
||||
from solar.utils import get_local
|
||||
from random import getrandbits
|
||||
import uuid
|
||||
from functools import wraps, total_ordering
|
||||
from operator import itemgetter
|
||||
import time
|
||||
from contextlib import contextmanager
|
||||
from threading import RLock
|
||||
from solar.dblayer.conflict_resolution import dblayer_conflict_resolver
|
||||
|
||||
|
||||
class DBLayerException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class DBLayerNotFound(DBLayerException):
|
||||
pass
|
||||
|
||||
|
||||
class DBLayerNoRiakObj(DBLayerException):
|
||||
pass
|
||||
|
||||
|
||||
class NONE:
|
||||
"""A None like type"""
|
||||
pass
|
||||
|
||||
|
||||
class SingleIndexCache(object):
|
||||
|
||||
def __init__(self):
|
||||
self.lock = RLock()
|
||||
self.cached_vals = []
|
||||
|
||||
def __enter__(self):
|
||||
self.lock.acquire()
|
||||
return self
|
||||
|
||||
def fill(self, values):
|
||||
self.cached_vals = values
|
||||
|
||||
def wipe(self):
|
||||
self.cached_vals = []
|
||||
|
||||
def get_index(self, real_funct, ind_name, **kwargs):
|
||||
kwargs.setdefault('max_results', 999999)
|
||||
if not self.cached_vals:
|
||||
recvs = real_funct(ind_name, **kwargs).results
|
||||
self.fill(recvs)
|
||||
|
||||
def filter(self, startkey, endkey, max_results=1):
|
||||
c = self.cached_vals
|
||||
for (curr_val, obj_key) in c:
|
||||
if max_results == 0:
|
||||
break
|
||||
if curr_val >= startkey:
|
||||
if curr_val <= endkey:
|
||||
max_results -= 1
|
||||
yield (curr_val, obj_key)
|
||||
else:
|
||||
break
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
self.lock.release()
|
||||
|
||||
|
||||
|
||||
class SingleClassCache(object):
|
||||
|
||||
__slots__ = ['obj_cache', 'db_ch_state',
|
||||
'lazy_save', 'origin_class']
|
||||
|
||||
def __init__(self, origin_class):
|
||||
self.obj_cache = {}
|
||||
self.db_ch_state = {'index': set()}
|
||||
self.lazy_save = set()
|
||||
self.origin_class = origin_class
|
||||
|
||||
|
||||
class ClassCache(object):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._l = RLock()
|
||||
|
||||
def __get__(self, inst, owner):
|
||||
# th = current_thread()
|
||||
with self._l:
|
||||
l = Model._local
|
||||
# better don't duplicate class names
|
||||
cache_name = owner.__name__
|
||||
try:
|
||||
cache_id = l.cache_id
|
||||
except AttributeError:
|
||||
cache_id = uuid.UUID(int=getrandbits(128), version=4).hex
|
||||
setattr(l, 'cache_id', cache_id)
|
||||
if getattr(l, 'cache_id_cmp', None) != cache_id:
|
||||
# new cache
|
||||
setattr(l, 'cache_id_cmp', cache_id)
|
||||
c = SingleClassCache(owner)
|
||||
setattr(l, '_model_caches', {})
|
||||
l._model_caches[cache_name] = c
|
||||
try:
|
||||
# already had this owner in cache
|
||||
return l._model_caches[cache_name]
|
||||
except KeyError:
|
||||
# old cache but first time this owner
|
||||
c = SingleClassCache(owner)
|
||||
l._model_caches[cache_name] = c
|
||||
return c
|
||||
|
||||
|
||||
def clear_cache():
|
||||
# th = current_thread()
|
||||
l = Model._local
|
||||
cache_id = uuid.UUID(int=getrandbits(128), version=4).hex
|
||||
setattr(l, 'cache_id_cmp', cache_id)
|
||||
|
||||
|
||||
def get_bucket(_, owner, mcs):
|
||||
name = owner.get_bucket_name()
|
||||
bucket = mcs.riak_client.bucket(name)
|
||||
bucket.resolver = dblayer_conflict_resolver
|
||||
return bucket
|
||||
|
||||
|
||||
def changes_state_for(_type):
|
||||
def _inner1(f):
|
||||
@wraps(f)
|
||||
def _inner2(obj, *args, **kwargs):
|
||||
obj._c.db_ch_state['index'].add(obj.key)
|
||||
obj.save_lazy()
|
||||
return f(obj, *args, **kwargs)
|
||||
return _inner2
|
||||
return _inner1
|
||||
|
||||
|
||||
def clears_state_for(_type):
|
||||
def _inner1(f):
|
||||
@wraps(f)
|
||||
def _inner2(obj, *args, **kwargs):
|
||||
try:
|
||||
obj._c.db_ch_state[_type].remove(obj.key)
|
||||
except KeyError:
|
||||
pass
|
||||
return f(obj, *args, **kwargs)
|
||||
return _inner2
|
||||
return _inner1
|
||||
|
||||
|
||||
def requires_clean_state(_type):
|
||||
def _inner1(f):
|
||||
@wraps(f)
|
||||
def _inner2(obj, *args, **kwargs):
|
||||
check_state_for(_type, obj)
|
||||
return f(obj, *args, **kwargs)
|
||||
return _inner2
|
||||
return _inner1
|
||||
|
||||
|
||||
def check_state_for(_type, obj):
|
||||
state = obj._c.db_ch_state.get(_type)
|
||||
if state:
|
||||
if True:
|
||||
# TODO: solve it
|
||||
orig_state = state
|
||||
obj.save_all_lazy()
|
||||
state = obj._c.db_ch_state.get(_type)
|
||||
if not state:
|
||||
return
|
||||
raise Exception("Dirty state, save all %r objects first" % obj.__class__)
|
||||
|
||||
|
||||
@total_ordering
|
||||
class StrInt(object):
|
||||
|
||||
precision = 3
|
||||
positive_char = 'p'
|
||||
negative_char = 'n'
|
||||
format_size = 10 + precision
|
||||
|
||||
|
||||
def __init__(self, val=None):
|
||||
self._val = self._make_val(val)
|
||||
|
||||
def __str__(self):
|
||||
return self._val.__str__()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s: %r>" % (self.__class__.__name__, self._val)
|
||||
|
||||
@classmethod
|
||||
def p_max(cls):
|
||||
return cls(int('9' * cls.format_size))
|
||||
|
||||
@classmethod
|
||||
def p_min(cls):
|
||||
return cls(1)
|
||||
|
||||
@classmethod
|
||||
def n_max(cls):
|
||||
return -cls.p_max()
|
||||
|
||||
@classmethod
|
||||
def n_min(cls):
|
||||
return -cls.p_min()
|
||||
|
||||
def __neg__(self):
|
||||
time_ = self.int_val()
|
||||
ret = self.__class__(-time_)
|
||||
return ret
|
||||
|
||||
@classmethod
|
||||
def greater(cls, inst):
|
||||
if isinstance(inst, cls):
|
||||
return cls(inst._val + 'g')
|
||||
return cls(inst + 'g')
|
||||
|
||||
@classmethod
|
||||
def to_hex(cls, value):
|
||||
char = cls.positive_char
|
||||
if value < 0:
|
||||
value = int('9' * cls.format_size) + value
|
||||
char = cls.negative_char
|
||||
f = '%s%%.%dx' % (char, cls.format_size - 2)
|
||||
value = f % value
|
||||
if value[-1] == 'L':
|
||||
value = value[:-1]
|
||||
return value
|
||||
|
||||
@classmethod
|
||||
def from_hex(cls, value):
|
||||
v = int(value[1:], 16)
|
||||
if value[0] == cls.negative_char:
|
||||
v -= int('9' * self.format_size)
|
||||
return v
|
||||
|
||||
def int_val(self):
|
||||
return self.from_hex(self._val)
|
||||
|
||||
@classmethod
|
||||
def from_simple(cls, value):
|
||||
return cls(value)
|
||||
|
||||
@classmethod
|
||||
def to_simple(cls, value):
|
||||
return value._val
|
||||
|
||||
@classmethod
|
||||
def _make_val(cls, val):
|
||||
if val is None:
|
||||
val = time.time()
|
||||
if isinstance(val, (long, int, float)):
|
||||
if isinstance(val, float):
|
||||
val = int(val * (10 ** cls.precision))
|
||||
val = cls.to_hex(val)
|
||||
elif isinstance(val, cls):
|
||||
val = val._val
|
||||
return val
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, basestring):
|
||||
first_ch = other[0]
|
||||
if first_ch not in (self.positive_char, self.negative_char):
|
||||
raise Exception("Cannot compare %r with %r" % (self, other))
|
||||
else:
|
||||
other = self.from_simple(other)
|
||||
if not isinstance(other, self.__class__):
|
||||
raise Exception("Cannot compare %r with %r" % (self, other))
|
||||
so = other._val[0]
|
||||
ss = self._val[0]
|
||||
son = so == other.negative_char
|
||||
ssn = so == self.negative_char
|
||||
if son != ssn:
|
||||
return False
|
||||
return self._val[1:] == other._val[1:]
|
||||
|
||||
def __gt__(self, other):
|
||||
if isinstance(other, basestring):
|
||||
first_ch = other[0]
|
||||
if first_ch not in (self.positive_char, self.negative_char):
|
||||
raise Exception("Cannot compare %r with %r" % (self, other))
|
||||
else:
|
||||
other = self.from_simple(other)
|
||||
if not isinstance(other, self.__class__):
|
||||
raise Exception("Cannot compare %r with %r" % (self, other))
|
||||
so = other._val[0]
|
||||
ss = self._val[0]
|
||||
if ss == self.positive_char and so == other.negative_char:
|
||||
return -1
|
||||
elif ss == self.negative_char and so == other.positive_char:
|
||||
return 1
|
||||
else:
|
||||
return other._val[1:] < self._val[1:]
|
||||
|
||||
|
||||
|
||||
class Replacer(object):
|
||||
|
||||
def __init__(self, name, fget, *args):
|
||||
self.name = name
|
||||
self.fget = fget
|
||||
self.args = args
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
val = self.fget(instance, owner, *self.args)
|
||||
if instance is not None:
|
||||
setattr(instance, self.name, val)
|
||||
else:
|
||||
setattr(owner, self.name, val)
|
||||
return val
|
||||
|
||||
|
||||
class FieldBase(object):
|
||||
def __init__(self, fname, default):
|
||||
self._fname = fname
|
||||
self._default = default
|
||||
|
||||
@property
|
||||
def fname(self):
|
||||
return self._fname
|
||||
|
||||
@fname.setter
|
||||
def fname(self, value):
|
||||
if self._fname is None:
|
||||
self._fname = value
|
||||
else:
|
||||
raise Exception("fname already set")
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
if self._default is NONE:
|
||||
return self._default
|
||||
if callable(self._default):
|
||||
return self._default()
|
||||
return self._default
|
||||
|
||||
|
||||
class Field(FieldBase):
|
||||
|
||||
# in from_dict, when you set value to None, then types that are *not* there are set to NONE
|
||||
_not_nullable_types = {int, float, long, str, unicode, basestring}
|
||||
_simple_types = {int, float, long, str, unicode, basestring, list, tuple, dict}
|
||||
|
||||
def __init__(self, _type, fname=None, default=NONE):
|
||||
if _type == str:
|
||||
_type = basestring
|
||||
self._type = _type
|
||||
super(Field, self).__init__(fname=fname, default=default)
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self
|
||||
val = instance._data_container[self.fname]
|
||||
if self._type in self._simple_types:
|
||||
return val
|
||||
else:
|
||||
return self._type.from_simple(val)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
if not isinstance(value, self._type):
|
||||
raise Exception("Invalid type %r for %r, expected %r" % (type(value), self.fname, self._type))
|
||||
if self._type not in self._simple_types:
|
||||
value = self._type.to_simple(value)
|
||||
instance._field_changed(self)
|
||||
instance._data_container[self.fname] = value
|
||||
return value
|
||||
|
||||
def __str__(self):
|
||||
return "<%s:%r>" % (self.__class__.__name__, self.fname)
|
||||
|
||||
__repr__ = __str__
|
||||
|
||||
|
||||
class IndexedField(Field):
|
||||
|
||||
def __set__(self, instance, value):
|
||||
value = super(IndexedField, self).__set__(instance, value)
|
||||
instance._set_index('{}_bin'.format(self.fname), value)
|
||||
return value
|
||||
|
||||
def _filter(self, startkey, endkey=None, **kwargs):
|
||||
if isinstance(startkey, self._type) and self._type not in self._simple_types:
|
||||
startkey = self._type.to_simple(startkey)
|
||||
if isinstance(endkey, self._type) and self._type not in self._simple_types:
|
||||
endkey = self._type.to_simple(endkey)
|
||||
kwargs.setdefault('max_results', 1000000)
|
||||
res = self._declared_in._get_index('{}_bin'.format(self.fname),
|
||||
startkey=startkey,
|
||||
endkey=endkey,
|
||||
**kwargs).results
|
||||
return res
|
||||
|
||||
def filter(self, *args, **kwargs):
|
||||
kwargs['return_terms'] = False
|
||||
res = self._filter(*args, **kwargs)
|
||||
return res
|
||||
|
||||
|
||||
class IndexFieldWrp(object):
|
||||
|
||||
def __init__(self, field_obj, instance):
|
||||
self._field_obj = field_obj
|
||||
self._instance = instance
|
||||
self._c = self._instance._c
|
||||
|
||||
@property
|
||||
def fname(self):
|
||||
return self._field_obj.fname
|
||||
|
||||
def __str__(self):
|
||||
return "<%s for field %s>" % (self.__class__.__name__, self._field_obj)
|
||||
|
||||
def _get_field_val(self, name):
|
||||
return self._instance._data_container[self.fname][name]
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self._get_field_val(name)
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
inst = self._instance
|
||||
inst._add_index('%s_bin' % self.fname, '{}|{}'.format(name, value))
|
||||
|
||||
def __delitem__(self, name):
|
||||
inst = self._instance
|
||||
del inst._data_container[self.fname][name]
|
||||
indexes = inst._riak_object.indexes
|
||||
|
||||
# TODO: move this to backend layer
|
||||
for ind_name, ind_value in indexes:
|
||||
if ind_name == ('%s_bin' % self.fname):
|
||||
if ind_value.startswith('{}|'.format(name)):
|
||||
inst._remove_index(ind_name, ind_value)
|
||||
break
|
||||
|
||||
|
||||
class IndexField(FieldBase):
|
||||
|
||||
_wrp_class = IndexFieldWrp
|
||||
|
||||
def __init__(self, fname=None, default=NONE):
|
||||
super(IndexField, self).__init__(fname, default)
|
||||
|
||||
def _on_no_inst(self, instance, owner):
|
||||
return self
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self._on_no_inst(instance, owner)
|
||||
cached = getattr(instance, '_real_obj_%s' % self.fname, None)
|
||||
if cached:
|
||||
return cached
|
||||
obj = self._wrp_class(self, instance)
|
||||
setattr(instance, '_real_obj_%s' % self.fname, obj)
|
||||
return obj
|
||||
|
||||
def __set__(self, instance, value):
|
||||
wrp = getattr(instance, self.fname)
|
||||
instance._data_container[self.fname] = self.default
|
||||
for f_name, f_value in value.iteritems():
|
||||
wrp[f_name] = f_value
|
||||
|
||||
def _parse_key(self, k):
|
||||
if '=' in k:
|
||||
val, subval = k.split('=', 1)
|
||||
if subval is None:
|
||||
subval = ''
|
||||
if not isinstance(subval, basestring):
|
||||
subval = str(subval)
|
||||
return '{}|{}'.format(val, subval)
|
||||
|
||||
def filter(self, startkey, endkey=None, **kwargs):
|
||||
startkey = self._parse_key(startkey)
|
||||
if endkey is None:
|
||||
if startkey.endswith('*'):
|
||||
startkey = startkey[:-1]
|
||||
endkey = startkey + '~'
|
||||
else:
|
||||
endkey = startkey + ' '
|
||||
kwargs.setdefault('max_results', 1000000)
|
||||
kwargs['return_terms'] = False
|
||||
res = self._declared_in._get_index('{}_bin'.format(self.fname),
|
||||
startkey=startkey,
|
||||
endkey=endkey,
|
||||
**kwargs).results
|
||||
return list(res)
|
||||
|
||||
|
||||
class CompositeIndexFieldWrp(IndexFieldWrp):
|
||||
|
||||
def reset(self):
|
||||
index = []
|
||||
for f in self._field_obj.fields:
|
||||
index.append(self._instance._data_container.get(f, ''))
|
||||
index = '|'.join(index)
|
||||
|
||||
index_to_del = []
|
||||
for index_name, index_val in self._instance._riak_object.indexes:
|
||||
if index_name == '%s_bin' % self.fname:
|
||||
if index != index_val:
|
||||
index_to_del.append((index_name, index_val))
|
||||
|
||||
for index_name, index_val in index_to_del:
|
||||
self._instance._remove_index(index_name, index_val)
|
||||
|
||||
self._instance._add_index('%s_bin' % self.fname, index)
|
||||
|
||||
class CompositeIndexField(IndexField):
|
||||
|
||||
_wrp_class = CompositeIndexFieldWrp
|
||||
|
||||
def __init__(self, fields=(), *args, **kwargs):
|
||||
super(CompositeIndexField, self).__init__(*args, **kwargs)
|
||||
self.fields = fields
|
||||
|
||||
def _parse_key(self, startkey):
|
||||
vals = [startkey[f] for f in self.fields if f in startkey]
|
||||
return '|'.join(vals) + '*'
|
||||
|
||||
|
||||
class ModelMeta(type):
|
||||
|
||||
_defined_models = set()
|
||||
|
||||
def __new__(mcs, name, bases, attrs):
|
||||
cls = super(ModelMeta, mcs).__new__(mcs, name, bases, attrs)
|
||||
model_fields = set((name for (name, attr) in attrs.items()
|
||||
if isinstance(attr, FieldBase) and not name.startswith('_')))
|
||||
for f in model_fields:
|
||||
field = getattr(cls, f)
|
||||
if hasattr(field, 'fname') and field.fname is None:
|
||||
setattr(field, 'fname', f)
|
||||
setattr(field, 'gname', f)
|
||||
# need to set declared_in because `with_tag`
|
||||
# no need to wrap descriptor with another object then
|
||||
setattr(field, '_declared_in', cls)
|
||||
|
||||
for base in bases:
|
||||
try:
|
||||
model_fields_base = base._model_fields
|
||||
except AttributeError:
|
||||
continue
|
||||
else:
|
||||
for given in base._model_fields:
|
||||
model_fields.add(given)
|
||||
|
||||
|
||||
cls._model_fields = [getattr(cls, x) for x in model_fields]
|
||||
|
||||
if bases == (object, ):
|
||||
return cls
|
||||
|
||||
if issubclass(cls, NestedModel):
|
||||
return cls
|
||||
|
||||
cls.bucket = Replacer('bucket', get_bucket, mcs)
|
||||
mcs._defined_models.add(cls)
|
||||
return cls
|
||||
|
||||
|
||||
@classmethod
|
||||
def setup(mcs, riak_client):
|
||||
if hasattr(mcs, 'riak_client'):
|
||||
raise DBLayerException("Setup already done")
|
||||
mcs.riak_client = riak_client
|
||||
|
||||
|
||||
@classmethod
|
||||
def remove_all(mcs):
|
||||
for model in mcs._defined_models:
|
||||
model.delete_all()
|
||||
|
||||
@classmethod
|
||||
def save_all_lazy(mcs, result=True):
|
||||
for cls in mcs._defined_models:
|
||||
for to_save in cls._c.lazy_save:
|
||||
try:
|
||||
to_save.save()
|
||||
except DBLayerException:
|
||||
continue
|
||||
cls._c.lazy_save.clear()
|
||||
|
||||
@classmethod
|
||||
def session_end(mcs, result=True):
|
||||
mcs.save_all_lazy()
|
||||
mcs.riak_client.session_end(result)
|
||||
|
||||
@classmethod
|
||||
def session_start(mcs):
|
||||
clear_cache()
|
||||
mcs.riak_client.session_start()
|
||||
|
||||
|
||||
class NestedField(FieldBase):
|
||||
|
||||
def __init__(self, _class, fname=None, default=NONE, hash_key=None):
|
||||
self._class = _class
|
||||
self._hash_key = hash_key
|
||||
super(NestedField, self).__init__(fname=fname, default=default)
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
if instance is None:
|
||||
return self
|
||||
cached = getattr(instance, '_real_obj_%s' % self.fname, None)
|
||||
if cached:
|
||||
return cached
|
||||
if self._hash_key is not None:
|
||||
obj = NestedModelHash(self, instance, self._class, self._hash_key)
|
||||
else:
|
||||
obj = self._class(self, instance)
|
||||
setattr(instance, '_real_obj_%s' % self.fname, obj)
|
||||
return obj
|
||||
|
||||
def __set__(self, instance, value):
|
||||
obj = getattr(instance, self.fname)
|
||||
obj.from_dict(value)
|
||||
|
||||
def __delete__(self, instance):
|
||||
obj = getattr(instance, self.fname)
|
||||
obj.delete()
|
||||
|
||||
|
||||
class NestedModel(object):
|
||||
|
||||
__metaclass__ = ModelMeta
|
||||
|
||||
_nested_value = None
|
||||
|
||||
def __init__(self, field, parent):
|
||||
self._field = field
|
||||
self._parent = parent
|
||||
|
||||
def from_dict(self, data):
|
||||
for field in self._model_fields:
|
||||
fname = field.fname
|
||||
gname = field.gname
|
||||
val = data.get(fname, NONE)
|
||||
default = field.default
|
||||
if val is NONE and default is not NONE:
|
||||
setattr(self, gname, default)
|
||||
elif val is not NONE:
|
||||
setattr(self, gname, val)
|
||||
return
|
||||
|
||||
@property
|
||||
def _data_container(self):
|
||||
pdc = self._parent._data_container
|
||||
try:
|
||||
ppdc = pdc[self._field.fname]
|
||||
except KeyError:
|
||||
ppdc = pdc[self._field.fname] = {}
|
||||
if self._field._hash_key is None:
|
||||
return ppdc
|
||||
else:
|
||||
try:
|
||||
ret = ppdc[self._nested_value]
|
||||
except KeyError:
|
||||
ret = ppdc[self._nested_value] = {}
|
||||
return ret
|
||||
|
||||
def _field_changed(self, field):
|
||||
return self._parent._modified_fields.add(self._field.fname)
|
||||
|
||||
def delete(self):
|
||||
if self._field._hash_key is None:
|
||||
del self._parent._data_container[self._field.fname]
|
||||
|
||||
|
||||
|
||||
class NestedModelHash(object):
|
||||
|
||||
def __init__(self, field, parent, _class, hash_key):
|
||||
self._field = field
|
||||
self._parent = parent
|
||||
self._class = _class
|
||||
self._hash_key = hash_key
|
||||
self._cache = {}
|
||||
|
||||
def __getitem__(self, name):
|
||||
try:
|
||||
return self._cache[name]
|
||||
except KeyError:
|
||||
obj = self._class(self._field, self._parent)
|
||||
obj._nested_value = name
|
||||
self._cache[name] = obj
|
||||
return obj
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
obj = self[name]
|
||||
return obj.from_dict(value)
|
||||
|
||||
def __delitem__(self, name):
|
||||
obj = self[name]
|
||||
obj.delete()
|
||||
del self._cache[name]
|
||||
|
||||
def from_dict(self, data):
|
||||
hk = data[self._hash_key]
|
||||
self[hk] = data
|
||||
|
||||
|
||||
|
||||
class Model(object):
|
||||
|
||||
__metaclass__ = ModelMeta
|
||||
|
||||
_c = ClassCache()
|
||||
|
||||
_key = None
|
||||
_new = None
|
||||
_real_riak_object = None
|
||||
|
||||
_changed = False
|
||||
|
||||
_local = get_local()()
|
||||
|
||||
def __init__(self, key=None):
|
||||
self._modified_fields = set()
|
||||
# TODO: that _indexes_changed should be smarter
|
||||
self._indexes_changed = False
|
||||
self.key = key
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
return self._key
|
||||
|
||||
@key.setter
|
||||
def key(self, value):
|
||||
if self._key is None:
|
||||
self._key = value
|
||||
else:
|
||||
raise Exception("Can't set key again")
|
||||
|
||||
@property
|
||||
def _riak_object(self):
|
||||
if self._real_riak_object is None:
|
||||
raise DBLayerNoRiakObj("You cannot access _riak_object now")
|
||||
return self._real_riak_object
|
||||
|
||||
@_riak_object.setter
|
||||
def _riak_object(self, value):
|
||||
if self._real_riak_object is not None:
|
||||
raise DBLayerException("Already have _riak_object")
|
||||
self._real_riak_object = value
|
||||
|
||||
|
||||
@property
|
||||
def _data_container(self):
|
||||
return self._riak_object.data
|
||||
|
||||
@changes_state_for('index')
|
||||
def _set_index(self, name, value):
|
||||
self._indexes_changed = True
|
||||
return self._riak_object.set_index(name, value)
|
||||
|
||||
@changes_state_for('index')
|
||||
def _add_index(self, *args, **kwargs):
|
||||
self._indexes_changed = True
|
||||
return self._riak_object.add_index(*args, **kwargs)
|
||||
|
||||
@changes_state_for('index')
|
||||
def _remove_index(self, *args, **kwargs):
|
||||
self._indexes_changed = True
|
||||
return self._riak_object.remove_index(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def _get_index(cls, *args, **kwargs):
|
||||
return cls.bucket.get_index(*args, **kwargs)
|
||||
|
||||
@property
|
||||
def _bucket(self):
|
||||
return self._riak_object.bucket
|
||||
|
||||
@classmethod
|
||||
def get_bucket_name(cls):
|
||||
# XXX: should be changed and more smart
|
||||
return cls.__name__
|
||||
|
||||
def _field_changed(self, field):
|
||||
self._modified_fields.add(field.fname)
|
||||
|
||||
def changed(self):
|
||||
if self._modified_fields:
|
||||
return True
|
||||
return self._indexes_changed
|
||||
|
||||
def to_dict(self):
|
||||
d = dict(self._riak_object.data)
|
||||
d['key'] = self.key
|
||||
return d
|
||||
|
||||
def __str__(self):
|
||||
if self._riak_object is None:
|
||||
return "<%s not initialized>" % (self.__class__.__name__)
|
||||
return "<%s %s:%s>" % (self.__class__.__name__, self._riak_object.bucket.name, self.key)
|
||||
|
||||
|
||||
@classmethod
|
||||
def new(cls, key, data):
|
||||
return cls.from_dict(key, data)
|
||||
|
||||
@classmethod
|
||||
def get_or_create(cls, key):
|
||||
try:
|
||||
return cls.get(key)
|
||||
except DBLayerNotFound:
|
||||
return cls.new(key, {})
|
||||
|
||||
@classmethod
|
||||
def from_riakobj(cls, riak_obj):
|
||||
obj = cls(riak_obj.key)
|
||||
obj._riak_object = riak_obj
|
||||
if obj._new is None:
|
||||
obj._new = False
|
||||
cls._c.obj_cache[riak_obj.key] = obj
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, key, data=None):
|
||||
if isinstance(key, dict) and data is None:
|
||||
data = key
|
||||
try:
|
||||
key = data['key']
|
||||
except KeyError:
|
||||
raise DBLayerException("No key specified")
|
||||
if key and 'key' in data and data['key'] != key:
|
||||
raise DBLayerException("Different key values detected")
|
||||
data['key'] = key
|
||||
riak_obj = cls.bucket.new(key, data={})
|
||||
obj = cls.from_riakobj(riak_obj)
|
||||
obj._new = True
|
||||
|
||||
for field in cls._model_fields:
|
||||
# if field is cls._pkey_field:
|
||||
# continue # pkey already set
|
||||
fname = field.fname
|
||||
gname = field.gname
|
||||
val = data.get(fname, NONE)
|
||||
default = field.default
|
||||
if val is None and field._type not in field._not_nullable_types:
|
||||
val = NONE
|
||||
if val is NONE and default is not NONE:
|
||||
setattr(obj, gname, default)
|
||||
elif val is not NONE:
|
||||
setattr(obj, gname, val)
|
||||
return obj
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.key)
|
||||
|
||||
@classmethod
|
||||
def get(cls, key):
|
||||
try:
|
||||
return cls._c.obj_cache[key]
|
||||
except KeyError:
|
||||
riak_object = cls.bucket.get(key)
|
||||
if not riak_object.exists:
|
||||
raise DBLayerNotFound(key)
|
||||
else:
|
||||
obj = cls.from_riakobj(riak_object)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
def multi_get(cls, keys):
|
||||
# TODO: parallel execution
|
||||
ret = map(cls.get, keys)
|
||||
return ret
|
||||
|
||||
def _reset_state(self):
|
||||
self._new = False
|
||||
self._modified_fields.clear()
|
||||
self._indexes_changed = False
|
||||
|
||||
@classmethod
|
||||
def save_all_lazy(cls):
|
||||
for to_save in set(cls._c.lazy_save):
|
||||
try:
|
||||
to_save.save()
|
||||
except DBLayerException:
|
||||
continue
|
||||
cls._c.lazy_save.clear()
|
||||
|
||||
|
||||
@clears_state_for('index')
|
||||
def save(self, force=False):
|
||||
if self.changed() or force or self._new:
|
||||
res = self._riak_object.store()
|
||||
self._reset_state()
|
||||
return res
|
||||
else:
|
||||
raise DBLayerException("No changes")
|
||||
|
||||
def save_lazy(self):
|
||||
self._c.lazy_save.add(self)
|
||||
|
||||
@classmethod
|
||||
def delete_all(cls):
|
||||
cls.riak_client.delete_all(cls)
|
||||
|
||||
def delete(self):
|
||||
ls = self._c.lazy_save
|
||||
try:
|
||||
ls.remove(self)
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
del self._c.obj_cache[self.key]
|
||||
except KeyError:
|
||||
pass
|
||||
self._riak_object.delete()
|
||||
return self
|
25
solar/solar/dblayer/riak_client.py
Normal file
25
solar/solar/dblayer/riak_client.py
Normal file
@ -0,0 +1,25 @@
|
||||
from riak import RiakClient as OrigRiakClient
|
||||
import time
|
||||
|
||||
from solar.dblayer.model import clear_cache
|
||||
|
||||
|
||||
class RiakClient(OrigRiakClient):
|
||||
|
||||
def session_start(self):
|
||||
clear_cache()
|
||||
|
||||
def session_end(self, result=True):
|
||||
# ignore result
|
||||
clear_cache()
|
||||
|
||||
def delete_all(self, cls):
|
||||
for _ in xrange(10):
|
||||
# riak dislikes deletes without dvv
|
||||
rst = cls.bucket.get_index('$bucket', startkey='_', max_results=100000).results
|
||||
for key in rst:
|
||||
cls.bucket.delete(key)
|
||||
else:
|
||||
return
|
||||
time.sleep(0.5)
|
||||
|
964
solar/solar/dblayer/solar_models.py
Normal file
964
solar/solar/dblayer/solar_models.py
Normal file
@ -0,0 +1,964 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from solar.dblayer.model import (Model, Field, IndexField,
|
||||
IndexFieldWrp,
|
||||
DBLayerException,
|
||||
requires_clean_state, check_state_for,
|
||||
StrInt, SingleIndexCache,
|
||||
IndexedField, CompositeIndexField)
|
||||
from types import NoneType
|
||||
from operator import itemgetter
|
||||
from enum import Enum
|
||||
from uuid import uuid4
|
||||
from collections import defaultdict
|
||||
|
||||
from solar.utils import solar_map
|
||||
|
||||
InputTypes = Enum('InputTypes',
|
||||
'simple list hash list_hash')
|
||||
|
||||
|
||||
class DBLayerSolarException(DBLayerException):
|
||||
pass
|
||||
|
||||
|
||||
class InputsFieldWrp(IndexFieldWrp):
|
||||
|
||||
_simple_types = (NoneType, int, float, basestring, str, unicode)
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InputsFieldWrp, self).__init__(*args, **kwargs)
|
||||
# TODO: add cache for lookup
|
||||
self.inputs_index_cache = SingleIndexCache()
|
||||
self._cache = {}
|
||||
|
||||
def _input_type(self, resource, name):
|
||||
# XXX: it could be worth to precalculate it
|
||||
if ':' in name:
|
||||
name = name.split(":", 1)[0]
|
||||
schema = resource.meta_inputs[name]['schema']
|
||||
if isinstance(schema, self._simple_types):
|
||||
return InputTypes.simple
|
||||
if isinstance(schema, list):
|
||||
if len(schema) > 0 and isinstance(schema[0], dict):
|
||||
return InputTypes.list_hash
|
||||
return InputTypes.list
|
||||
if isinstance(schema, dict):
|
||||
return InputTypes.hash
|
||||
raise Exception("Unknown type")
|
||||
|
||||
def _edges_fmt(self, vals):
|
||||
for val in vals:
|
||||
data = val.split('|')
|
||||
dlen = len(data)
|
||||
my_resource = data[0]
|
||||
my_input = data[1]
|
||||
other_resource = data[2]
|
||||
other_input = data[3]
|
||||
if dlen == 5:
|
||||
meta = None
|
||||
elif dlen == 7:
|
||||
meta = {'destination_key': data[5],
|
||||
'tag': data[4]}
|
||||
else:
|
||||
raise Exception("Unsupported case")
|
||||
yield (other_resource, other_input), (my_resource, my_input), meta
|
||||
|
||||
def _edges(self):
|
||||
inst = self._instance
|
||||
start = inst.key
|
||||
my_ind_name = '{}_recv_bin'.format(self.fname)
|
||||
res = inst._get_index(my_ind_name,
|
||||
startkey=start + '|',
|
||||
endkey=start + '|~',
|
||||
return_terms=True,
|
||||
max_results=99999).results
|
||||
vals = map(itemgetter(0), res)
|
||||
return self._edges_fmt(vals)
|
||||
|
||||
def _single_edge(self, name):
|
||||
inst = self._instance
|
||||
self._has_own_input(name)
|
||||
start = '{}|{}'.format(inst.key, name)
|
||||
my_ind_name = '{}_recv_bin'.format(self.fname)
|
||||
res = inst._get_index(my_ind_name,
|
||||
startkey=start + '|',
|
||||
endkey=start + '|~',
|
||||
return_terms=True,
|
||||
max_results=99999).results
|
||||
vals = map(itemgetter(0), res)
|
||||
return self._edges_fmt(vals)
|
||||
|
||||
def __contains__(self, name):
|
||||
try:
|
||||
self._has_own_input(name)
|
||||
except Exception:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
def __iter__(self):
|
||||
for name in self._instance._data_container[self.fname]:
|
||||
yield name
|
||||
|
||||
def keys(self):
|
||||
return list(self.__iter__())
|
||||
|
||||
def as_dict(self):
|
||||
items = solar_map(lambda x: (x, self._get_field_val(x)), [x for x in self], concurrency=3)
|
||||
return dict(items)
|
||||
|
||||
def _connect_my_simple(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
types_mapping = '|{}_{}'.format(my_type.value, other_type.value)
|
||||
my_ind_name = '{}_recv_bin'.format(self.fname)
|
||||
my_ind_val = '{}|{}|{}|{}'.format(my_resource.key,
|
||||
my_inp_name,
|
||||
other_resource.key,
|
||||
other_inp_name)
|
||||
my_ind_val += types_mapping
|
||||
|
||||
real_my_type = self._input_type(my_resource, my_inp_name)
|
||||
if real_my_type == InputTypes.simple:
|
||||
for ind_name, ind_value in my_resource._riak_object.indexes:
|
||||
if ind_name == my_ind_name:
|
||||
mr, mn, _ = ind_value.split('|', 2)
|
||||
if mr == my_resource.key and mn == my_inp_name:
|
||||
my_resource._remove_index(ind_name, ind_value)
|
||||
break
|
||||
|
||||
my_resource._add_index(my_ind_name, my_ind_val)
|
||||
return my_inp_name
|
||||
|
||||
def _connect_other_simple(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
other_ind_name = '{}_emit_bin'.format(self.fname)
|
||||
|
||||
real_my_type = self._input_type(my_resource, my_inp_name)
|
||||
if real_my_type == InputTypes.simple or ':' not in my_inp_name:
|
||||
other_ind_val = '{}|{}|{}|{}'.format(other_resource.key,
|
||||
other_inp_name,
|
||||
my_resource.key,
|
||||
my_inp_name)
|
||||
for ind_name, ind_value in my_resource._riak_object.indexes:
|
||||
if ind_name == other_ind_name:
|
||||
try:
|
||||
mr, mn = ind_value.rsplit('|')[2:]
|
||||
except ValueError:
|
||||
if len(ind_value.split('|')) == 6:
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
if mr == my_resource.key and mn == my_inp_name:
|
||||
my_resource._remove_index(ind_name, ind_value)
|
||||
break
|
||||
|
||||
elif real_my_type in (InputTypes.list_hash, InputTypes.hash, InputTypes.list):
|
||||
my_key, my_val = my_inp_name.split(':', 1)
|
||||
if '|' in my_val:
|
||||
my_val, my_tag = my_val.split('|', 1)
|
||||
else:
|
||||
my_tag = other_resource.name
|
||||
my_inp_name = my_key
|
||||
other_ind_val = '{}|{}|{}|{}|{}|{}'.format(other_resource.key,
|
||||
other_inp_name,
|
||||
my_resource.key,
|
||||
my_inp_name,
|
||||
my_tag,
|
||||
my_val)
|
||||
for ind_name, ind_value in my_resource._riak_object.indexes:
|
||||
if ind_name == other_ind_name:
|
||||
try:
|
||||
mr, mn, mt, mv = ind_value.rsplit('|')[2:]
|
||||
except ValueError:
|
||||
if len(ind_value.split('|')) == 4:
|
||||
continue
|
||||
else:
|
||||
raise
|
||||
if mr == my_resource.key and mn == my_inp_name \
|
||||
and mt == my_tag and mv == my_val:
|
||||
my_resource._remove_index(ind_name, ind_value)
|
||||
break
|
||||
else:
|
||||
raise Exception("Unsupported connection type")
|
||||
my_resource._add_index(other_ind_name,
|
||||
other_ind_val)
|
||||
return other_inp_name
|
||||
|
||||
def _connect_other_hash(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
return self._connect_other_simple(my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type)
|
||||
|
||||
def _connect_other_list_hash(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
return self._connect_other_simple(my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type)
|
||||
|
||||
def _connect_my_list(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
ret = self._connect_my_simple(my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type)
|
||||
return ret
|
||||
|
||||
def _connect_my_hash(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
|
||||
my_key, my_val = my_inp_name.split(':', 1)
|
||||
if '|' in my_val:
|
||||
my_val, my_tag = my_val.split('|', 1)
|
||||
else:
|
||||
my_tag = other_resource.name
|
||||
types_mapping = '|{}_{}'.format(my_type.value, other_type.value)
|
||||
my_ind_name = '{}_recv_bin'.format(self.fname)
|
||||
my_ind_val = '{}|{}|{}|{}|{}|{}'.format(my_resource.key,
|
||||
my_key,
|
||||
other_resource.key,
|
||||
other_inp_name,
|
||||
my_tag,
|
||||
my_val
|
||||
)
|
||||
my_ind_val += types_mapping
|
||||
|
||||
my_resource._add_index(my_ind_name, my_ind_val)
|
||||
return my_key
|
||||
|
||||
def _connect_my_list_hash(self, my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type):
|
||||
return self._connect_my_hash(my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type)
|
||||
|
||||
def connect(self, my_inp_name, other_resource, other_inp_name):
|
||||
my_resource = self._instance
|
||||
other_type = self._input_type(other_resource, other_inp_name)
|
||||
my_type = self._input_type(my_resource, my_inp_name)
|
||||
|
||||
if my_type == other_type and not ':' in my_inp_name:
|
||||
# if the type is the same map 1:1, and flat
|
||||
my_type = InputTypes.simple
|
||||
other_type = InputTypes.simple
|
||||
elif my_type == InputTypes.list_hash and other_type == InputTypes.hash:
|
||||
# whole dict to list with dicts
|
||||
# TODO: solve this problem
|
||||
if ':' in my_inp_name:
|
||||
my_type = InputTypes.hash
|
||||
else:
|
||||
my_type = InputTypes.list
|
||||
|
||||
# set my side
|
||||
my_meth = getattr(self, '_connect_my_{}'.format(my_type.name))
|
||||
my_affected = my_meth(my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type)
|
||||
|
||||
# set other side
|
||||
other_meth = getattr(self, '_connect_other_{}'.format(other_type.name))
|
||||
other_meth(my_resource, my_inp_name, other_resource, other_inp_name, my_type, other_type)
|
||||
|
||||
try:
|
||||
del self._cache[my_affected]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
with self.inputs_index_cache as c:
|
||||
c.wipe()
|
||||
|
||||
return True
|
||||
|
||||
def disconnect(self, name):
|
||||
# ind_name = '{}_recv_bin'.format(self.fname)
|
||||
if ':' in name:
|
||||
# disconnect from hash with tag
|
||||
normalized_name, tag_and_target = name.split(':', 1)
|
||||
my_val, my_tag = tag_and_target.split('|', 1)
|
||||
emit_name = None
|
||||
# emit_name = '{}|{}'.format(my_tag, my_val)
|
||||
full_name = '{}|{}|{}'.format(normalized_name, my_tag, my_val)
|
||||
name = normalized_name
|
||||
elif '|'in name:
|
||||
# disconnect everything from given input|resource
|
||||
my_input, other_resource, other_input = name.split('|', 2)
|
||||
full_name = my_input
|
||||
emit_name = '{}|{}'.format(other_resource, other_input)
|
||||
normalized_name = "{}|{}".format(my_input, other_resource)
|
||||
name = name.split('|', 1)[0]
|
||||
my_val, my_tag = None, None
|
||||
else:
|
||||
# disconnect everything from given input
|
||||
full_name = name
|
||||
emit_name = None
|
||||
normalized_name = name
|
||||
my_val, my_tag = None, None
|
||||
indexes = self._instance._riak_object.indexes
|
||||
to_dels = []
|
||||
recvs = filter(lambda x: x[0] == '{}_recv_bin'.format(self.fname), indexes)
|
||||
for recv in recvs:
|
||||
_, ind_value = recv
|
||||
if ind_value.startswith('{}|{}|'.format(self._instance.key, normalized_name)):
|
||||
spl = ind_value.split('|')
|
||||
if len(spl) == 7 and my_tag and my_val:
|
||||
if spl[-3] == my_tag and spl[-2] == my_val:
|
||||
to_dels.append(recv)
|
||||
else:
|
||||
to_dels.append(recv)
|
||||
emits = filter(lambda x: x[0] == '{}_emit_bin'.format(self.fname), indexes)
|
||||
for emit in emits:
|
||||
_, ind_value = emit
|
||||
if ind_value.endswith('|{}|{}'.format(self._instance.key, full_name)):
|
||||
if emit_name:
|
||||
if ind_value.startswith(emit_name):
|
||||
to_dels.append(emit)
|
||||
else:
|
||||
to_dels.append(emit)
|
||||
|
||||
for to_del in to_dels:
|
||||
self._instance._remove_index(*to_del)
|
||||
|
||||
try:
|
||||
del self._cache[name]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
with self.inputs_index_cache as c:
|
||||
c.wipe()
|
||||
|
||||
def _has_own_input(self, name):
|
||||
try:
|
||||
return self._cache[name]
|
||||
except KeyError:
|
||||
pass
|
||||
my_name = self._instance.key
|
||||
try:
|
||||
self._get_raw_field_val(name)
|
||||
except KeyError:
|
||||
raise DBLayerSolarException('No input {} for {}'.format(name, my_name))
|
||||
else:
|
||||
return True
|
||||
|
||||
def _get_field_val(self, name, other=None):
|
||||
# maybe it should be tco
|
||||
if other:
|
||||
full_name = '{}_other_{}'.format(name, other)
|
||||
else:
|
||||
full_name = name
|
||||
try:
|
||||
return self._cache[full_name]
|
||||
except KeyError:
|
||||
pass
|
||||
check_state_for('index', self._instance)
|
||||
fname = self.fname
|
||||
my_name = self._instance.key
|
||||
self._has_own_input(name)
|
||||
ind_name = '{}_recv_bin'.format(fname)
|
||||
with self.inputs_index_cache as c:
|
||||
kwargs = dict(startkey='{}|'.format(my_name),
|
||||
endkey='{}|~'.format(my_name),
|
||||
return_terms=True)
|
||||
my_type = self._input_type(self._instance, name)
|
||||
if my_type == InputTypes.simple:
|
||||
max_results = 1
|
||||
else:
|
||||
max_results = 99999
|
||||
c.get_index(self._instance._get_index, ind_name, **kwargs)
|
||||
recvs = tuple(c.filter(startkey="{}|{}|".format(my_name, name),
|
||||
endkey="{}|{}|~".format(my_name, name),
|
||||
max_results=max_results))
|
||||
if not recvs:
|
||||
_res = self._get_raw_field_val(name)
|
||||
self._cache[name] = _res
|
||||
if other:
|
||||
other_res = self._get_field_val(other)
|
||||
self._cache[full_name] = other_res
|
||||
return other_res
|
||||
return _res
|
||||
my_meth = getattr(self, '_map_field_val_{}'.format(my_type.name))
|
||||
return my_meth(recvs, name, my_name, other=other)
|
||||
|
||||
|
||||
def _map_field_val_simple(self, recvs, input_name, name, other=None):
|
||||
recvs = recvs[0]
|
||||
index_val, obj_key = recvs
|
||||
_, inp, emitter_key, emitter_inp, _mapping_type = index_val.split('|', 4)
|
||||
res = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
self._cache[name] = res
|
||||
return res
|
||||
|
||||
def _map_field_val_list(self, recvs, input_name, name, other=None):
|
||||
if len(recvs) == 1:
|
||||
recv = recvs[0]
|
||||
index_val, obj_key = recv
|
||||
_, inp, emitter_key, emitter_inp, mapping_type = index_val.split('|', 4)
|
||||
res = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
if mapping_type != "{}_{}".format(InputTypes.simple.value, InputTypes.simple.value):
|
||||
res = [res]
|
||||
else:
|
||||
res = []
|
||||
for recv in recvs:
|
||||
index_val, obj_key = recv
|
||||
_, _, emitter_key, emitter_inp, mapping_type = index_val.split('|', 4)
|
||||
cres = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
res.append(cres)
|
||||
self._cache[name] = res
|
||||
return res
|
||||
|
||||
def _map_field_val_hash_single(self, recvs, input_name, other):
|
||||
items = []
|
||||
tags = set()
|
||||
for recv in recvs:
|
||||
index_val, obj_key = recv
|
||||
_, _, emitter_key, emitter_inp, my_tag, my_val, mapping_type = index_val.split('|', 6)
|
||||
cres = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
items.append((my_tag, my_val, cres))
|
||||
tags.add(my_tag)
|
||||
return items, tags
|
||||
|
||||
def _map_field_val_hash(self, recvs, input_name, name, other=None):
|
||||
if len(recvs) == 1:
|
||||
# just one connected
|
||||
recv = recvs[0]
|
||||
index_val, obj_key = recv
|
||||
splitted = index_val.split('|')
|
||||
splen = len(splitted)
|
||||
if splen == 5:
|
||||
# 1:1
|
||||
_, inp, emitter_key, emitter_inp, mapping_type = splitted
|
||||
if mapping_type != "{}_{}".format(InputTypes.simple.value, InputTypes.simple.value):
|
||||
raise NotImplementedError()
|
||||
res = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
elif splen == 7:
|
||||
# partial
|
||||
_, _, emitter_key, emitter_inp, my_tag, my_val, mapping_type = splitted
|
||||
cres = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
res = {my_val: cres}
|
||||
my_resource = self._instance
|
||||
my_resource_value = my_resource.inputs._get_raw_field_val(input_name)
|
||||
if my_resource_value:
|
||||
for my_val, cres in my_resource_value.iteritems():
|
||||
res[my_val] = cres
|
||||
else:
|
||||
raise Exception("Not supported splen %s", splen)
|
||||
else:
|
||||
items, tags = self._map_field_val_hash_single(recvs, input_name, other)
|
||||
my_resource = self._instance
|
||||
my_resource_value = my_resource.inputs._get_raw_field_val(input_name)
|
||||
if my_resource_value:
|
||||
res = my_resource_value
|
||||
else:
|
||||
res = {}
|
||||
if len(tags) != 1:
|
||||
# TODO: add it also for during connecting
|
||||
raise Exception("Detected dict with different tags")
|
||||
for _, my_val, value in items:
|
||||
res[my_val] = value
|
||||
self._cache[name] = res
|
||||
return res
|
||||
|
||||
def _map_field_val_list_hash(self, recvs, input_name, name, other=None):
|
||||
items = []
|
||||
tags = set()
|
||||
for recv in recvs:
|
||||
index_val, obj_key = recv
|
||||
splitted_val = index_val.split('|', 6)
|
||||
if len(splitted_val) == 5:
|
||||
# it was list hash but with whole dict mapping
|
||||
_, _, emitter_key, emitter_inp, mapping_type = splitted_val
|
||||
cres = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
items.append((emitter_key, None, cres))
|
||||
else:
|
||||
_, _, emitter_key, emitter_inp, my_tag, my_val, mapping_type = splitted_val
|
||||
cres = Resource.get(emitter_key).inputs._get_field_val(emitter_inp, other)
|
||||
mapping_type = splitted_val[-1]
|
||||
items.append((my_tag, my_val, cres))
|
||||
tmp_res = {}
|
||||
for first, my_val, value in items:
|
||||
if my_val is None:
|
||||
tmp_res[first] = value
|
||||
else:
|
||||
try:
|
||||
tmp_res[first][my_val] = value
|
||||
except KeyError:
|
||||
tmp_res[first] = {my_val: value}
|
||||
res = tmp_res.values()
|
||||
self._cache[name] = res
|
||||
return res
|
||||
|
||||
def _get_raw_field_val(self, name):
|
||||
return self._instance._data_container[self.fname][name]
|
||||
|
||||
def __getitem__(self, name):
|
||||
return self._get_field_val(name)
|
||||
|
||||
def __delitem__(self, name):
|
||||
self._has_own_input(name)
|
||||
self._instance._field_changed(self)
|
||||
try:
|
||||
del self._cache[name]
|
||||
except KeyError:
|
||||
pass
|
||||
inst = self._instance
|
||||
inst._riak_object.remove_index('%s_bin' % self.fname, '{}|{}'.format(self._instance.key, name))
|
||||
del inst._data_container[self.fname][name]
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
self._instance._field_changed(self)
|
||||
return self._set_field_value(name, value)
|
||||
|
||||
def items(self):
|
||||
return self._instance._data_container[self.fname].items()
|
||||
|
||||
def get(self, name, default=None):
|
||||
if self._has_own_input(name):
|
||||
return self[name]
|
||||
else:
|
||||
return default
|
||||
|
||||
def _set_field_value(self, name, value):
|
||||
fname = self.fname
|
||||
my_name = self._instance.key
|
||||
ind_name = '{}_recv_bin'.format(fname)
|
||||
recvs = self._instance._get_index(ind_name,
|
||||
startkey='{}|{}|'.format(my_name, name),
|
||||
endkey='{}|{}|~'.format(my_name,name),
|
||||
max_results=1,
|
||||
return_terms=True).results
|
||||
if recvs:
|
||||
recvs = recvs[0]
|
||||
res, inp, emitter_name, emitter_inp = recvs[0].split('|')[:4]
|
||||
raise Exception("%s:%s is connected with resource %s:%s" % (res, inp, emitter_name, emitter_inp))
|
||||
# inst = self._instance
|
||||
robj = self._instance._riak_object
|
||||
if name not in robj.data[self.fname]:
|
||||
self._instance._add_index('%s_bin' % self.fname, '{}|{}'.format(my_name, name))
|
||||
robj.data[self.fname][name] = value
|
||||
|
||||
with self.inputs_index_cache as c:
|
||||
c.wipe()
|
||||
self._cache[name] = value
|
||||
return True
|
||||
|
||||
def to_dict(self):
|
||||
rst = {}
|
||||
for key in self._instance._data_container[self.fname].keys():
|
||||
rst[key] = self[key]
|
||||
return rst
|
||||
|
||||
|
||||
class InputsField(IndexField):
|
||||
_wrp_class = InputsFieldWrp
|
||||
|
||||
def __set__(self, instance, value):
|
||||
wrp = getattr(instance, self.fname)
|
||||
instance._data_container[self.fname] = self.default
|
||||
for inp_name, inp_value in value.iteritems():
|
||||
wrp[inp_name] = inp_value
|
||||
|
||||
|
||||
class TagsFieldWrp(IndexFieldWrp):
|
||||
|
||||
def __getitem__(self, name):
|
||||
raise TypeError('You cannot get tags like this')
|
||||
|
||||
def __setitem__(self, name, value):
|
||||
raise TypeError('You cannot set tags like this')
|
||||
|
||||
def __delitem__(self, name, value):
|
||||
raise TypeError('You cannot set tags like this')
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._instance._data_container[self.fname])
|
||||
|
||||
def as_list(self):
|
||||
try:
|
||||
return self._instance._data_container[self.fname][:]
|
||||
except KeyError:
|
||||
return []
|
||||
|
||||
def set(self, name, value=None):
|
||||
if '=' in name and value is None:
|
||||
name, value = name.split('=', 1)
|
||||
if value is None:
|
||||
value = ''
|
||||
full_value = '{}={}'.format(name, value)
|
||||
inst = self._instance
|
||||
try:
|
||||
fld = inst._data_container[self.fname]
|
||||
except IndexError:
|
||||
fld = inst._data_container[self.fname] = []
|
||||
if full_value in fld:
|
||||
return
|
||||
# indexes = inst._riak_object.indexes.copy() # copy it
|
||||
inst._add_index('{}_bin'.format(self.fname), '{}~{}'.format(name, value))
|
||||
try:
|
||||
fld.append(full_value)
|
||||
except KeyError:
|
||||
fld = [full_value]
|
||||
return True
|
||||
|
||||
def has_tag(self, name, subval=None):
|
||||
fld = self._instance._data_container[self.fname]
|
||||
if not name in fld:
|
||||
return False
|
||||
if subval is not None:
|
||||
subvals = fld[name]
|
||||
return subval in subvals
|
||||
return True
|
||||
|
||||
def remove(self, name, value=None):
|
||||
if '=' in name and value is None:
|
||||
name, value = name.split('=', 1)
|
||||
if value is None:
|
||||
value = ''
|
||||
inst = self._instance
|
||||
fld = inst._data_container[self.fname]
|
||||
full_value = '{}={}'.format(name, value)
|
||||
try:
|
||||
vals = fld.remove(full_value)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
inst._remove_index('{}_bin'.format(self.fname), '{}~{}'.format(name, value))
|
||||
return True
|
||||
|
||||
|
||||
|
||||
class TagsField(IndexField):
|
||||
_wrp_class = TagsFieldWrp
|
||||
|
||||
def __set__(self, instance, value):
|
||||
wrp = getattr(instance, self.fname)
|
||||
instance._data_container[self.fname] = self.default
|
||||
for val in value:
|
||||
wrp.set(val)
|
||||
|
||||
def filter(self, name, subval=None):
|
||||
check_state_for('index', self._declared_in)
|
||||
if '=' in name and subval is None:
|
||||
name, subval = name.split('=', 1)
|
||||
if subval is None:
|
||||
subval = ''
|
||||
if not isinstance(subval, basestring):
|
||||
subval = str(subval)
|
||||
# maxresults because of riak bug with small number of results
|
||||
# https://github.com/basho/riak/issues/608
|
||||
if not subval.endswith('*'):
|
||||
res = self._declared_in._get_index('{}_bin'.format(self.fname),
|
||||
startkey='{}~{}'.format(name, subval),
|
||||
endkey='{}~{} '.format(name, subval), # space required
|
||||
max_results=100000,
|
||||
return_terms=True).results
|
||||
else:
|
||||
subval = subval.replace('*', '')
|
||||
res = self._declared_in._get_index('{}_bin'.format(self.fname),
|
||||
startkey='{}~{}'.format(name, subval),
|
||||
endkey='{}~{}~'.format(name, subval), # space required
|
||||
max_results=100000,
|
||||
return_terms=True).results
|
||||
return set(map(itemgetter(1), res))
|
||||
|
||||
|
||||
|
||||
# class MetaInput(NestedModel):
|
||||
|
||||
# name = Field(str)
|
||||
# schema = Field(str)
|
||||
# value = None # TODO: implement it
|
||||
# is_list = Field(bool)
|
||||
# is_hash = Field(bool)
|
||||
|
||||
|
||||
class Resource(Model):
|
||||
|
||||
name = Field(str)
|
||||
|
||||
version = Field(str)
|
||||
base_name = Field(str)
|
||||
base_path = Field(str)
|
||||
actions_path = Field(str)
|
||||
actions = Field(dict)
|
||||
handler = Field(str)
|
||||
puppet_module = Field(str) # remove
|
||||
meta_inputs = Field(dict, default=dict)
|
||||
state = Field(str) # on_set/on_get would be useful
|
||||
events = Field(list, default=list)
|
||||
|
||||
inputs = InputsField(default=dict)
|
||||
tags = TagsField(default=list)
|
||||
|
||||
|
||||
updated = IndexedField(StrInt)
|
||||
|
||||
def _connect_single(self, other_inputs, other_name, my_name):
|
||||
if isinstance(other_name, (list, tuple)):
|
||||
# XXX: could be paralelized
|
||||
for other in other_name:
|
||||
other_inputs.connect(other, self, my_name)
|
||||
else:
|
||||
other_inputs.connect(other_name, self, my_name)
|
||||
|
||||
def connect(self, other, mapping):
|
||||
my_inputs = self.inputs
|
||||
other_inputs = other.inputs
|
||||
if mapping is None:
|
||||
return
|
||||
if self == other:
|
||||
raise Exception('Trying to connect value-.* to itself')
|
||||
solar_map(lambda (my_name, other_name): self._connect_single(other_inputs, other_name, my_name),
|
||||
mapping.iteritems(), concurrency=2)
|
||||
|
||||
def disconnect(self, other, inputs):
|
||||
def _to_disconnect((emitter, receiver, meta)):
|
||||
if not receiver[0] == other_key:
|
||||
return False
|
||||
# name there?
|
||||
if not emitter[0] == self.key:
|
||||
return False
|
||||
key = emitter[1]
|
||||
if not key in converted:
|
||||
return False
|
||||
convs = converted[key]
|
||||
for conv in convs:
|
||||
if conv:
|
||||
if meta['tag'] == conv['tag'] \
|
||||
and meta['destination_key'] == conv['destination_key']:
|
||||
return True
|
||||
else:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _convert_input(input):
|
||||
spl = input.split('|')
|
||||
spl_len = len(spl)
|
||||
if spl_len == 1:
|
||||
# normal input
|
||||
return input, None
|
||||
elif spl_len == 3:
|
||||
return spl[0], {'tag': spl[1],
|
||||
'destination_key': spl[2]}
|
||||
else:
|
||||
raise Exception("Cannot convert input %r" % input)
|
||||
|
||||
def _format_for_disconnect((emitter, receiver, meta)):
|
||||
input = receiver[1]
|
||||
if not meta:
|
||||
return "{}|{}|{}".format(receiver[1], emitter[0], emitter[1])
|
||||
dest_key = meta['destination_key']
|
||||
tag = meta.get('tag', other.name)
|
||||
return '{}:{}|{}'.format(input, dest_key, tag)
|
||||
|
||||
|
||||
converted = defaultdict(list)
|
||||
for k, v in map(_convert_input, inputs):
|
||||
converted[k].append(v)
|
||||
other_key = other.key
|
||||
edges = other.inputs._edges()
|
||||
edges = filter(_to_disconnect, edges)
|
||||
inputs = map(_format_for_disconnect, edges)
|
||||
solar_map(other.inputs.disconnect, inputs, concurrency=2)
|
||||
|
||||
def save(self, *args, **kwargs):
|
||||
if self.changed():
|
||||
self.updated = StrInt()
|
||||
return super(Resource, self).save(*args, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def childs(cls, parents):
|
||||
|
||||
all_indexes = cls.bucket.get_index(
|
||||
'inputs_recv_bin',
|
||||
startkey='',
|
||||
endkey='~',
|
||||
return_terms=True,
|
||||
max_results=999999)
|
||||
|
||||
tmp = defaultdict(set)
|
||||
to_visit = parents[:]
|
||||
visited = set()
|
||||
|
||||
for item in all_indexes.results:
|
||||
data = item[0].split('|')
|
||||
em, rcv = data[0], data[2]
|
||||
tmp[rcv].add(em)
|
||||
|
||||
while to_visit:
|
||||
n = to_visit.pop()
|
||||
for child in tmp[n]:
|
||||
if child not in visited:
|
||||
to_visit.append(child)
|
||||
visited.add(n)
|
||||
return visited
|
||||
|
||||
def delete(self):
|
||||
inputs_index = self.bucket.get_index(
|
||||
'inputs_emit_bin',
|
||||
startkey=self.key,
|
||||
endkey=self.key+'~',
|
||||
return_terms=True,
|
||||
max_results=999999)
|
||||
|
||||
to_disconnect_all = defaultdict(list)
|
||||
for emit_bin in inputs_index.results:
|
||||
index_vals = emit_bin[0].split('|')
|
||||
index_vals_len = len(index_vals)
|
||||
if index_vals_len == 6: # hash
|
||||
_, my_input, other_res, other_input, my_tag, my_val = index_vals
|
||||
to_disconnect_all[other_res].append("{}|{}|{}".format(my_input, my_tag, my_val))
|
||||
elif index_vals_len == 4:
|
||||
_, my_input, other_res, other_input = index_vals
|
||||
to_disconnect_all[other_res].append(other_input)
|
||||
else:
|
||||
raise Exception("Unknown input %r" % index_vals)
|
||||
for other_obj_key, to_disconnect in to_disconnect_all.items():
|
||||
other_obj = Resource.get(other_obj_key)
|
||||
self.disconnect(other_obj, to_disconnect)
|
||||
super(Resource, self).delete()
|
||||
|
||||
|
||||
class CommitedResource(Model):
|
||||
|
||||
inputs = Field(dict, default=dict)
|
||||
connections = Field(list, default=list)
|
||||
base_path = Field(str)
|
||||
tags = Field(list, default=list)
|
||||
state = Field(str, default=lambda: 'removed')
|
||||
|
||||
|
||||
"""
|
||||
Type of operations:
|
||||
|
||||
- load all tasks for execution
|
||||
- load single task + childs + all parents of childs (and transitions between them)
|
||||
"""
|
||||
|
||||
class TasksFieldWrp(IndexFieldWrp):
|
||||
|
||||
def add(self, task):
|
||||
return True
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._instance._data_container[self.fname])
|
||||
|
||||
def all(self, postprocessor=None):
|
||||
if postprocessor:
|
||||
return map(postprocessor, self)
|
||||
return list(self)
|
||||
|
||||
def all_names(self):
|
||||
return self.all(lambda key: key.split('~')[1])
|
||||
|
||||
def all_tasks(self):
|
||||
return self.all(Task.get)
|
||||
|
||||
def _add(self, parent, child):
|
||||
parent._data_container['childs'].append(child.key)
|
||||
child._data_container['parents'].append(parent.key)
|
||||
|
||||
child._add_index('childs_bin', parent.key)
|
||||
parent._add_index('parents_bin', child.key)
|
||||
return True
|
||||
|
||||
|
||||
class TasksField(IndexField):
|
||||
|
||||
_wrp_class = TasksFieldWrp
|
||||
|
||||
def __set__(self, obj, value):
|
||||
wrp = getattr(obj, self.fname)
|
||||
obj._data_container[self.fname] = self.default
|
||||
for val in value:
|
||||
wrp.add(val)
|
||||
|
||||
def _parse_key(self, startkey):
|
||||
return startkey
|
||||
|
||||
|
||||
|
||||
class ChildFieldWrp(TasksFieldWrp):
|
||||
|
||||
def add(self, task):
|
||||
return self._add(self._instance, task)
|
||||
|
||||
|
||||
class ChildField(TasksField):
|
||||
|
||||
_wrp_class = ChildFieldWrp
|
||||
|
||||
|
||||
class ParentFieldWrp(TasksFieldWrp):
|
||||
|
||||
def add(self, task):
|
||||
return self._add(task, self._instance)
|
||||
|
||||
|
||||
class ParentField(TasksField):
|
||||
|
||||
_wrp_class = ParentFieldWrp
|
||||
|
||||
|
||||
class Task(Model):
|
||||
"""Node object"""
|
||||
|
||||
name = Field(basestring)
|
||||
status = Field(basestring)
|
||||
target = Field(basestring, default=str)
|
||||
task_type = Field(basestring)
|
||||
args = Field(list)
|
||||
errmsg = Field(basestring, default=str)
|
||||
|
||||
execution = IndexedField(basestring)
|
||||
parents = ParentField(default=list)
|
||||
childs = ChildField(default=list)
|
||||
|
||||
@classmethod
|
||||
def new(cls, data):
|
||||
key = '%s~%s' % (data['execution'], data['name'])
|
||||
return Task.from_dict(key, data)
|
||||
|
||||
|
||||
"""
|
||||
system log
|
||||
|
||||
1. one bucket for all log items
|
||||
2. separate logs for stage/history (using index)
|
||||
3. last log item for resource in history
|
||||
4. log item in staged log for resource|action
|
||||
5. keep order of history
|
||||
"""
|
||||
|
||||
class NegativeCounter(Model):
|
||||
|
||||
count = Field(int, default=int)
|
||||
|
||||
def next(self):
|
||||
self.count -= 1
|
||||
self.save()
|
||||
return self.count
|
||||
|
||||
|
||||
class LogItem(Model):
|
||||
|
||||
uid = IndexedField(basestring, default=lambda: str(uuid4()))
|
||||
resource = Field(basestring)
|
||||
action = Field(basestring)
|
||||
diff = Field(list)
|
||||
connections_diff = Field(list)
|
||||
state = Field(basestring)
|
||||
base_path = Field(basestring) # remove me
|
||||
updated = Field(StrInt)
|
||||
|
||||
history = IndexedField(StrInt)
|
||||
log = Field(basestring) # staged/history
|
||||
|
||||
composite = CompositeIndexField(fields=('log', 'resource', 'action'))
|
||||
|
||||
@property
|
||||
def log_action(self):
|
||||
return '.'.join((self.resource, self.action))
|
||||
|
||||
@classmethod
|
||||
def history_last(cls):
|
||||
items = cls.history.filter(StrInt.n_max(), StrInt.n_min(), max_results=1)
|
||||
if not items:
|
||||
return None
|
||||
return cls.get(items[0])
|
||||
|
||||
def save(self):
|
||||
if any(f in self._modified_fields for f in LogItem.composite.fields):
|
||||
self.composite.reset()
|
||||
|
||||
if 'log' in self._modified_fields and self.log == 'history':
|
||||
self.history = StrInt(next(NegativeCounter.get_or_create('history')))
|
||||
return super(LogItem, self).save()
|
||||
|
||||
@classmethod
|
||||
def new(cls, data):
|
||||
vals = {}
|
||||
if 'uid' not in vals:
|
||||
vals['uid'] = cls.uid.default
|
||||
vals.update(data)
|
||||
return LogItem.from_dict(vals['uid'], vals)
|
436
solar/solar/dblayer/sql_client.py
Normal file
436
solar/solar/dblayer/sql_client.py
Normal file
@ -0,0 +1,436 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
from collections import deque
|
||||
import inspect
|
||||
import os
|
||||
import uuid
|
||||
import sys
|
||||
|
||||
from peewee import CharField, BlobField, IntegerField, \
|
||||
ForeignKeyField, Model, BooleanField, TextField, Field, Database
|
||||
|
||||
from solar.dblayer.model import clear_cache
|
||||
from threading import RLock
|
||||
|
||||
|
||||
# msgpack is way faster but less readable
|
||||
# using json for easier debug
|
||||
import json
|
||||
encoder = json.dumps
|
||||
|
||||
def wrapped_loads(data, *args, **kwargs):
|
||||
if not isinstance(data, basestring):
|
||||
data = str(data)
|
||||
return json.loads(data, *args, **kwargs)
|
||||
|
||||
decoder = wrapped_loads
|
||||
|
||||
|
||||
|
||||
class _DataField(BlobField):
|
||||
|
||||
def db_value(self, value):
|
||||
return super(_DataField, self).db_value(encoder(value))
|
||||
|
||||
def python_value(self, value):
|
||||
return decoder(super(_DataField, self).python_value(value))
|
||||
|
||||
|
||||
class _LinksField(_DataField):
|
||||
|
||||
def db_value(self, value):
|
||||
return super(_LinksField, self).db_value(list(value))
|
||||
|
||||
def python_value(self, value):
|
||||
ret = super(_LinksField, self).python_value(value)
|
||||
return [tuple(e) for e in ret]
|
||||
|
||||
|
||||
class _SqlBucket(Model):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
self._new = kwargs.pop('_new', False)
|
||||
ed = kwargs.pop('encoded_data', None)
|
||||
if ed:
|
||||
self.encoded_data = ed
|
||||
if 'data' not in kwargs:
|
||||
kwargs['data'] = {}
|
||||
super(_SqlBucket, self).__init__(*args, **kwargs)
|
||||
|
||||
key = CharField(primary_key=True, null=False)
|
||||
data = _DataField(null=False)
|
||||
vclock = CharField(max_length=32, null=False)
|
||||
links = _LinksField(null=False, default=list)
|
||||
|
||||
@property
|
||||
def encoded_data(self):
|
||||
return self.data.get('_encoded_data')
|
||||
|
||||
@encoded_data.setter
|
||||
def encoded_data(self, value):
|
||||
self.data['_encoded_data'] = value
|
||||
|
||||
def save(self, force_insert=False, only=None):
|
||||
if self._new:
|
||||
force_insert = True
|
||||
self._new = False
|
||||
ret = super(_SqlBucket, self).save(force_insert, only)
|
||||
return ret
|
||||
|
||||
@property
|
||||
def sql_session(self):
|
||||
return self.bucket.sql_session
|
||||
|
||||
|
||||
class FieldWrp(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.name = name
|
||||
|
||||
def __get__(self, instance, owner):
|
||||
return getattr(instance._sql_bucket_obj, self.name)
|
||||
|
||||
def __set__(self, instance, value):
|
||||
setattr(instance._sql_bucket_obj, self.name, value)
|
||||
|
||||
|
||||
class _SqlIdx(Model):
|
||||
name = CharField(null=False, index=True)
|
||||
value = CharField(null=False, index=True)
|
||||
|
||||
|
||||
class RiakObj(object):
|
||||
key = FieldWrp('key')
|
||||
data = FieldWrp('data')
|
||||
vclock = FieldWrp('vclock')
|
||||
links = FieldWrp('links')
|
||||
encoded_data = FieldWrp('encoded_data')
|
||||
|
||||
def __init__(self, sql_bucket_obj, new=False):
|
||||
self._sql_bucket_obj = sql_bucket_obj
|
||||
self.new = sql_bucket_obj._new
|
||||
self.fetch_indexes()
|
||||
|
||||
@property
|
||||
def sql_session(self):
|
||||
return self._sql_bucket_obj.sql_session
|
||||
|
||||
@property
|
||||
def bucket(self):
|
||||
return self._sql_bucket_obj.bucket
|
||||
|
||||
@property
|
||||
def indexes(self):
|
||||
self.fetch_indexes()
|
||||
return self._indexes
|
||||
|
||||
def fetch_indexes(self):
|
||||
if not hasattr(self, '_indexes'):
|
||||
idxes = self.bucket._sql_idx.select().where(
|
||||
self.bucket._sql_idx.key == self.key)
|
||||
self._indexes = set((idx.name, idx.value) for idx in idxes)
|
||||
|
||||
@indexes.setter
|
||||
def indexes(self, value):
|
||||
assert isinstance(value, set)
|
||||
self._indexes = value
|
||||
|
||||
def _save_indexes(self):
|
||||
# TODO: possible optimization
|
||||
# update only what's needed
|
||||
# don't delete all at first
|
||||
q = self.bucket._sql_idx.delete().where(
|
||||
self.bucket._sql_idx.key == self.key)
|
||||
q.execute()
|
||||
|
||||
for iname, ival in self.indexes:
|
||||
idx = self.bucket._sql_idx(key=self.key, name=iname, value=ival)
|
||||
idx.save()
|
||||
|
||||
def add_index(self, field, value):
|
||||
self.indexes.add((field, value))
|
||||
return self
|
||||
|
||||
def set_index(self, field, value):
|
||||
to_rem = set((x for x in self.indexes if x[0] == field))
|
||||
self.indexes.difference_update(to_rem)
|
||||
return self.add_index(field, value)
|
||||
|
||||
def remove_index(self, field=None, value=None):
|
||||
if field is None and value is None:
|
||||
# q = self.bucket._sql_idx.delete().where(
|
||||
# self.bucket._sql_idx.key == self.key)
|
||||
# q.execute()
|
||||
self.indexes.clear()
|
||||
elif field is not None and value is None:
|
||||
# q = self.bucket._sql_idx.delete().where(
|
||||
# (self.bucket._sql_idx.key == self.key) &
|
||||
# (self.bucket._sql_idx.name == field))
|
||||
# q.execute()
|
||||
to_rem = set((x for x in self.indexes if x[0] == field))
|
||||
self.indexes.difference_update(to_rem)
|
||||
elif field is not None and value is not None:
|
||||
# q = self.bucket._sql_idx.delete().where(
|
||||
# (self.bucket._sql_idx.key == self.key) &
|
||||
# (self.bucket._sql_idx.name == field) &
|
||||
# (self.bucket._sql_idx.value == value))
|
||||
# q.execute()
|
||||
to_rem = set((x for x in self.indexes if x[0] == field and x[1] == value))
|
||||
self.indexes.difference_update(to_rem)
|
||||
return self
|
||||
|
||||
def store(self, return_body=True):
|
||||
self.vclock = uuid.uuid4().hex
|
||||
assert self._sql_bucket_obj is not None
|
||||
self._sql_bucket_obj.save()
|
||||
self._save_indexes()
|
||||
return self
|
||||
|
||||
def delete(self):
|
||||
self._sql_bucket_obj.delete()
|
||||
return self
|
||||
|
||||
@property
|
||||
def exists(self):
|
||||
return not self.new
|
||||
|
||||
def get_link(self, tag):
|
||||
return next(x[1] for x in self.links if x[2] == tag)
|
||||
|
||||
def set_link(self, obj, tag=None):
|
||||
if isinstance(obj, tuple):
|
||||
newlink = obj
|
||||
else:
|
||||
newlink = (obj.bucket.name, obj.key, tag)
|
||||
|
||||
multi = [x for x in self.links if x[0:1] == newlink[0:1]]
|
||||
for item in multi:
|
||||
self.links.remove(item)
|
||||
|
||||
self.links.append(newlink)
|
||||
return self
|
||||
|
||||
def del_link(self, obj=None, tag=None):
|
||||
assert obj is not None or tag is not None
|
||||
if tag is not None:
|
||||
links = [x for x in self.links if x[2] != tag]
|
||||
else:
|
||||
links = self.links
|
||||
if obj is not None:
|
||||
if not isinstance(obj, tuple):
|
||||
obj = (obj.bucket.name, obj.key, tag)
|
||||
links = [x for x in links if x[0:1] == obj[0:1]]
|
||||
self.links = links
|
||||
return self
|
||||
|
||||
|
||||
class IndexPage(object):
|
||||
|
||||
def __init__(self, index, results, return_terms, max_results, continuation):
|
||||
self.max_results = max_results
|
||||
self.index = index
|
||||
if not return_terms:
|
||||
self.results = tuple(x[0] for x in results)
|
||||
else:
|
||||
self.results = tuple(results)
|
||||
|
||||
if not max_results or not self.results:
|
||||
self.continuation = None
|
||||
else:
|
||||
self.continuation = str(continuation + len(self.results))
|
||||
self.return_terms = return_terms
|
||||
|
||||
def __len__(self):
|
||||
return len(self.results)
|
||||
|
||||
def __getitem__(self, item):
|
||||
return self.results[item]
|
||||
|
||||
|
||||
class Bucket(object):
|
||||
|
||||
def __init__(self, name, client):
|
||||
self.client = client
|
||||
table_name = "bucket_%s" % name.lower()
|
||||
self.name = table_name
|
||||
idx_table_name = 'idx_%s' % name.lower()
|
||||
|
||||
class ModelMeta:
|
||||
db_table = table_name
|
||||
database = self.client.sql_session
|
||||
|
||||
self._sql_model = type(table_name, (_SqlBucket,),
|
||||
{'Meta': ModelMeta,
|
||||
'bucket': self})
|
||||
_idx_key = ForeignKeyField(self._sql_model, null=False, index=True)
|
||||
|
||||
class IdxMeta:
|
||||
db_table = idx_table_name
|
||||
database = self.client.sql_session
|
||||
|
||||
self._sql_idx = type(idx_table_name, (_SqlIdx,),
|
||||
{'Meta': IdxMeta,
|
||||
'bucket': self,
|
||||
'key': _idx_key})
|
||||
|
||||
def search(self, q, rows=10, start=0, sort=''):
|
||||
raise NotImplementedError()
|
||||
|
||||
def create_search(self, index):
|
||||
raise NotImplementedError()
|
||||
|
||||
def set_property(self, name, value):
|
||||
return
|
||||
|
||||
def get_properties(self):
|
||||
return {'search_index': False}
|
||||
|
||||
def get(self, key):
|
||||
try:
|
||||
ret = self._sql_model.get(self._sql_model.key == key)
|
||||
except self._sql_model.DoesNotExist:
|
||||
ret = None
|
||||
new = ret is None
|
||||
if new:
|
||||
ret = self._sql_model(key=key, _new=new)
|
||||
return RiakObj(ret, new)
|
||||
|
||||
def delete(self, data, *args, **kwargs):
|
||||
if isinstance(data, basestring):
|
||||
key = data
|
||||
else:
|
||||
key = data.key
|
||||
self._sql_model.delete().where(self._sql_model.key == key).execute()
|
||||
self._sql_idx.delete().where(self._sql_idx.key == key).execute()
|
||||
return self
|
||||
|
||||
def new(self, key, data=None, encoded_data=None, **kwargs):
|
||||
if key is not None:
|
||||
try:
|
||||
ret = self._sql_model.get(self._sql_model.key == key)
|
||||
except self._sql_model.DoesNotExist:
|
||||
ret = None
|
||||
new = ret is None
|
||||
else:
|
||||
key = uuid.uuid4().hex
|
||||
new = True
|
||||
if new:
|
||||
ret = self._sql_model(key=key, _new=new)
|
||||
ret.key = key
|
||||
ret.data = data if data is not None else {}
|
||||
if encoded_data:
|
||||
ret.encoded_data = encoded_data
|
||||
ret.links = []
|
||||
ret.vclock = "new"
|
||||
return RiakObj(ret, new)
|
||||
|
||||
def get_index(self, index, startkey, endkey=None, return_terms=None,
|
||||
max_results=None, continuation=None, timeout=None, fmt=None,
|
||||
term_regex=None):
|
||||
if startkey and endkey is None:
|
||||
endkey = startkey
|
||||
if startkey > endkey:
|
||||
startkey, endkey = endkey, startkey
|
||||
|
||||
if index == '$key':
|
||||
if return_terms:
|
||||
q = self._sql_model.select(
|
||||
self._sql_model.value, self._sql_model.key)
|
||||
else:
|
||||
q = self._sql_model.select(self._sql_model.key)
|
||||
q = q.where(
|
||||
self._sql_model.key >= startkey, self._sql_model.key <= endkey
|
||||
).order_by(self._sql_model.key)
|
||||
elif index == '$bucket':
|
||||
if return_terms:
|
||||
q = self._sql_model.select(
|
||||
self._sql_model.value, self._sql_model.key)
|
||||
else:
|
||||
q = self._sql_model.select(self._sql_model.key)
|
||||
if not startkey == '_' and endkey == '_':
|
||||
q = q.where(
|
||||
self._sql_model.key >= startkey, self._sql_model.key <= endkey
|
||||
)
|
||||
else:
|
||||
if return_terms:
|
||||
q = self._sql_idx.select(
|
||||
self._sql_idx.value, self._sql_idx.key)
|
||||
else:
|
||||
q = self._sql_idx.select(self._sql_idx.key)
|
||||
q = q.where(
|
||||
self._sql_idx.name == index,
|
||||
self._sql_idx.value >= startkey, self._sql_idx.value <= endkey
|
||||
).order_by(self._sql_idx.value)
|
||||
|
||||
max_results = int(max_results or 0)
|
||||
continuation = int(continuation or 0)
|
||||
if max_results:
|
||||
q = q.limit(max_results)
|
||||
if continuation:
|
||||
q = q.offset(continuation)
|
||||
|
||||
q = q.tuples()
|
||||
|
||||
return IndexPage(index, q, return_terms, max_results, continuation)
|
||||
|
||||
def multiget(self, keys):
|
||||
if not keys:
|
||||
return []
|
||||
else:
|
||||
q = self._sql_model.select().where(self._sql_model.key << list(keys))
|
||||
print q
|
||||
return map(RiakObj, list(q))
|
||||
|
||||
@property
|
||||
def sql_session(self):
|
||||
return self.client.sql_session
|
||||
|
||||
|
||||
class SqlClient(object):
|
||||
block = RLock()
|
||||
|
||||
search_dir = None
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
db_class_str = kwargs.pop("db_class", 'SqliteDatabase')
|
||||
try:
|
||||
mod, fromlist = db_class_str.split('.')
|
||||
except ValueError:
|
||||
mod = 'peewee'
|
||||
fromlist = db_class_str
|
||||
__import__(mod, fromlist=[fromlist])
|
||||
db_class = getattr(sys.modules[mod], fromlist)
|
||||
session = db_class(*args, **kwargs)
|
||||
self._sql_session = session
|
||||
self.buckets = {}
|
||||
|
||||
def bucket(self, name):
|
||||
with self.block:
|
||||
if name not in self.buckets:
|
||||
b = Bucket(name, self)
|
||||
b._sql_model.create_table(fail_silently=True)
|
||||
b._sql_idx.create_table(fail_silently=True)
|
||||
self.buckets[name] = b
|
||||
return self.buckets[name]
|
||||
|
||||
@property
|
||||
def sql_session(self):
|
||||
return self._sql_session
|
||||
|
||||
def session_start(self):
|
||||
clear_cache()
|
||||
sess = self._sql_session
|
||||
sess.begin()
|
||||
|
||||
def session_end(self, result=True):
|
||||
sess = self._sql_session
|
||||
if result:
|
||||
sess.commit()
|
||||
else:
|
||||
sess.rollback()
|
||||
clear_cache()
|
||||
|
||||
def delete_all(self, cls):
|
||||
# naive way for SQL, we could delete whole table contents
|
||||
rst = cls.bucket.get_index('$bucket', startkey='_', max_results=100000).results
|
||||
for key in rst:
|
||||
cls.bucket.delete(key)
|
30
solar/solar/dblayer/standalone_session_wrapper.py
Normal file
30
solar/solar/dblayer/standalone_session_wrapper.py
Normal file
@ -0,0 +1,30 @@
|
||||
"""
|
||||
Starts single seession, and ends it with `atexit`
|
||||
can be used from cli / examples
|
||||
shouldn't be used from long running processes (workers etc)
|
||||
|
||||
"""
|
||||
|
||||
try:
|
||||
from gevent import monkey
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
monkey.patch_all()
|
||||
from solar.dblayer.gevent_patches import patch_all
|
||||
patch_all()
|
||||
|
||||
def create_all():
|
||||
|
||||
import sys
|
||||
if sys.executable.startswith(('python', )):
|
||||
# auto add session to only standalone python runs
|
||||
return
|
||||
|
||||
from solar.dblayer.model import ModelMeta
|
||||
|
||||
import atexit
|
||||
|
||||
ModelMeta.session_start()
|
||||
|
||||
atexit.register(ModelMeta.session_end)
|
58
solar/solar/dblayer/test/conftest.py
Normal file
58
solar/solar/dblayer/test/conftest.py
Normal file
@ -0,0 +1,58 @@
|
||||
from solar.dblayer.model import Model, ModelMeta, get_bucket
|
||||
import pytest
|
||||
import time
|
||||
import string
|
||||
import random
|
||||
|
||||
def patched_get_bucket_name(cls):
|
||||
return cls.__name__ + str(time.time())
|
||||
|
||||
class RndObj(object):
|
||||
|
||||
def __init__(self, name):
|
||||
self.rnd = name + ''.join((random.choice(string.ascii_lowercase) for x in xrange(8)))
|
||||
self.calls = 0
|
||||
|
||||
def next(self):
|
||||
num = self.calls
|
||||
self.calls += 1
|
||||
return (self.rnd + str(num))
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def rk(request):
|
||||
|
||||
name = request.module.__name__ + request.function.__name__
|
||||
|
||||
obj = RndObj(name)
|
||||
|
||||
return obj
|
||||
|
||||
@pytest.fixture(scope='function')
|
||||
def rt(request):
|
||||
|
||||
name = request.module.__name__ + request.function.__name__
|
||||
|
||||
obj = RndObj(name)
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup(request):
|
||||
|
||||
for model in ModelMeta._defined_models:
|
||||
model.bucket = get_bucket(None, model, ModelMeta)
|
||||
|
||||
|
||||
def pytest_runtest_teardown(item, nextitem):
|
||||
ModelMeta.session_end(result=True)
|
||||
return nextitem
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
ModelMeta.session_start()
|
||||
|
||||
|
||||
Model.get_bucket_name = classmethod(patched_get_bucket_name)
|
247
solar/solar/dblayer/test/test_basic.py
Normal file
247
solar/solar/dblayer/test/test_basic.py
Normal file
@ -0,0 +1,247 @@
|
||||
import pytest
|
||||
from solar.dblayer.model import (Field, IndexField,
|
||||
clear_cache, Model,
|
||||
StrInt,
|
||||
DBLayerNotFound,
|
||||
DBLayerNoRiakObj,
|
||||
DBLayerException)
|
||||
|
||||
class M1(Model):
|
||||
|
||||
f1 = Field(str)
|
||||
f2 = Field(int)
|
||||
f3 = Field(int, fname='some_field')
|
||||
|
||||
ind = IndexField(default=dict)
|
||||
|
||||
|
||||
class M2(Model):
|
||||
f1 = Field(str)
|
||||
|
||||
ind = IndexField(default=dict)
|
||||
|
||||
|
||||
class M3(Model):
|
||||
f1 = Field(str)
|
||||
|
||||
ind = IndexField(default=dict)
|
||||
|
||||
|
||||
|
||||
def test_from_dict(rk):
|
||||
key = next(rk)
|
||||
|
||||
with pytest.raises(DBLayerException):
|
||||
M1.from_dict({'f1': 'blah', 'f2': 150, 'some_field': 250})
|
||||
|
||||
m1 = M1.from_dict({'key': key, 'f1': 'blah', 'f2': 150, 'some_field': 250})
|
||||
|
||||
m1.save()
|
||||
m11 = M1.get(key)
|
||||
assert m1.key == key
|
||||
assert m1.f3 == 250
|
||||
assert m1 is m11
|
||||
|
||||
|
||||
def test_not_exists(rk):
|
||||
key = next(rk)
|
||||
with pytest.raises(DBLayerNotFound):
|
||||
M1.get(key)
|
||||
|
||||
m1 = M1.from_dict(key, {'f1': 'blah', 'f2': 150})
|
||||
m1.save()
|
||||
M1.get(key)
|
||||
|
||||
|
||||
def test_update(rk):
|
||||
k = next(rk)
|
||||
m1 = M1.from_dict(k, {'f1': 'blah', 'f2': 150})
|
||||
m1.save()
|
||||
m1.f1 = 'blub'
|
||||
assert m1.f1 == 'blub'
|
||||
m1.save()
|
||||
assert m1.f1 == 'blub'
|
||||
m11 = M1.get(k)
|
||||
assert m11.f1 == 'blub'
|
||||
|
||||
clear_cache()
|
||||
m12 = M1.get(k)
|
||||
assert m12.f1 == 'blub'
|
||||
|
||||
|
||||
def test_lazy(rk):
|
||||
k = next(rk)
|
||||
m1 = M1.from_dict(k, {'f1': 'blah', 'f2': 150})
|
||||
m1.save()
|
||||
clear_cache()
|
||||
|
||||
m1 = M1(k)
|
||||
with pytest.raises(DBLayerNoRiakObj):
|
||||
assert m1.f1 == 'blah'
|
||||
|
||||
|
||||
def test_cache_logic(rk):
|
||||
k = next(rk)
|
||||
M1.session_start()
|
||||
assert M1._c.obj_cache == {}
|
||||
|
||||
m1 = M1.from_dict(k, {'f1': 'blah', 'f2': 150})
|
||||
m1.save()
|
||||
M1.session_end()
|
||||
|
||||
M1.session_start()
|
||||
assert M1._c.obj_cache == {}
|
||||
m11 = M1.get(k)
|
||||
pid = id(M1._c)
|
||||
assert M1._c.obj_cache == {k: m11}
|
||||
M1.session_end()
|
||||
|
||||
M1.session_start()
|
||||
assert M1._c.obj_cache == {}
|
||||
m12 = M1.get(k)
|
||||
aid = id(M1._c)
|
||||
|
||||
assert pid != aid
|
||||
|
||||
|
||||
def test_normal_index(rk):
|
||||
key = next(rk)
|
||||
key2 = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key, {'f1': 'blah', 'f2': 150,
|
||||
'ind': {'blah': 'something'}})
|
||||
m1.save()
|
||||
|
||||
m2 = M1.from_dict(key2, {'f1': 'blah', 'f2': 150,
|
||||
'ind': {'blah': 'something2'}})
|
||||
m2.save()
|
||||
assert set(M1.ind.filter('blah=somethi*')) == set([key, key2])
|
||||
assert set(M1.ind.filter('blah=something')) == set([key])
|
||||
assert set(M1.ind.filter('blah=something2')) == set([key2])
|
||||
|
||||
|
||||
def test_update(rk):
|
||||
key = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key, {'f1': 'blah', 'f2': 150})
|
||||
assert m1.changed() is True
|
||||
m1.save()
|
||||
|
||||
assert m1.changed() is False
|
||||
with pytest.raises(DBLayerException):
|
||||
m1.save()
|
||||
|
||||
m1.f1 = 'updated'
|
||||
assert m1.changed() is True
|
||||
|
||||
m1.save()
|
||||
|
||||
assert m1.f1 == 'updated'
|
||||
|
||||
clear_cache()
|
||||
m11 = M1.get(key)
|
||||
assert m11.f1 == 'updated'
|
||||
|
||||
|
||||
def test_different_models(rk):
|
||||
key = next(rk)
|
||||
|
||||
m2 = M2.from_dict(key, {'f1': 'm2', 'ind': {'blah': 'blub'}})
|
||||
m3 = M3.from_dict(key, {'f1': 'm3', 'ind': {'blah': 'blub'}})
|
||||
|
||||
m2.save()
|
||||
m3.save()
|
||||
|
||||
assert M2.get(key).f1 == 'm2'
|
||||
assert M3.get(key).f1 == 'm3'
|
||||
|
||||
|
||||
def test_cache_behaviour(rk):
|
||||
key1 = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key1, {'f1': 'm1'})
|
||||
|
||||
m11 = M1.get(key1)
|
||||
assert m1 is m11
|
||||
m1.save()
|
||||
assert m1 is m11
|
||||
|
||||
m12 = M1.get(key1)
|
||||
assert m1 is m12
|
||||
|
||||
clear_cache()
|
||||
m13 = M1.get(key1)
|
||||
assert m1 is not m13
|
||||
|
||||
|
||||
def test_save_lazy(rk):
|
||||
key1 = next(rk)
|
||||
key2 = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key1, {'f1': 'm1'})
|
||||
m2 = M1.from_dict(key2, {'f1': 'm2'})
|
||||
m1.save_lazy()
|
||||
m2.save_lazy()
|
||||
|
||||
m1g = M1.get(key1)
|
||||
m2g = M1.get(key2)
|
||||
|
||||
assert m1 is m1g
|
||||
assert m2 is m2g
|
||||
|
||||
assert M1._c.lazy_save == {m1, m2}
|
||||
M1.session_end()
|
||||
assert M1._c.lazy_save == set()
|
||||
|
||||
clear_cache()
|
||||
m1g2 = M1.get(key1)
|
||||
m2g2 = M1.get(key2)
|
||||
|
||||
assert m1g is not m1g2
|
||||
assert m2g is not m2g2
|
||||
|
||||
|
||||
def test_changed_index(rk):
|
||||
key1 = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key1, {'f1': 'm1'})
|
||||
|
||||
m1.save()
|
||||
# don't use _add_index directly
|
||||
m1._add_index('test_bin', 'blah')
|
||||
m1.save()
|
||||
|
||||
|
||||
def test_strint_comparsions():
|
||||
a = StrInt(-1)
|
||||
b = StrInt(-2)
|
||||
c = StrInt.to_simple(b)
|
||||
assert isinstance(c, basestring)
|
||||
assert a > b
|
||||
assert a > c
|
||||
|
||||
|
||||
def test_delete_cache_behaviour(rk):
|
||||
key1 = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key1, {'f1': 'm1'})
|
||||
|
||||
m1.save()
|
||||
|
||||
clear_cache()
|
||||
|
||||
M1.get(key1).delete()
|
||||
with pytest.raises(DBLayerNotFound):
|
||||
m12 = M1.get(key1)
|
||||
|
||||
|
||||
def test_fast_delete(rk):
|
||||
key1 = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key1, {'f1': 'm1'})
|
||||
m1.save()
|
||||
m1.delete()
|
||||
M1.session_start()
|
||||
m12 = M1.from_dict(key1, {'f1': 'm12'})
|
||||
m12.save()
|
||||
assert m12.f1 == 'm12'
|
45
solar/solar/dblayer/test/test_execution_models.py
Normal file
45
solar/solar/dblayer/test/test_execution_models.py
Normal file
@ -0,0 +1,45 @@
|
||||
import pytest
|
||||
|
||||
from solar.dblayer.solar_models import Task
|
||||
|
||||
|
||||
def test_tasks_selected_by_execution_id(rk):
|
||||
execution = next(rk)
|
||||
|
||||
for i in range(2):
|
||||
t = Task.new(
|
||||
{'name': str(i),
|
||||
'execution': execution})
|
||||
t.save()
|
||||
another_execution = next(rk)
|
||||
|
||||
for i in range(2):
|
||||
t = Task.new(
|
||||
{'name': str(i),
|
||||
'execution': another_execution})
|
||||
t.save()
|
||||
|
||||
assert len(Task.execution.filter(execution)) == 2
|
||||
assert len(Task.execution.filter(another_execution)) == 2
|
||||
|
||||
|
||||
def test_parent_child(rk):
|
||||
execution = next(rk)
|
||||
|
||||
t1 = Task.new(
|
||||
{'name': '1',
|
||||
'execution': execution})
|
||||
|
||||
t2 = Task.new(
|
||||
{'name': '2',
|
||||
'execution': execution})
|
||||
t1.childs.add(t2)
|
||||
t1.save()
|
||||
t2.save()
|
||||
|
||||
assert Task.childs.filter(t1.key) == [t2.key]
|
||||
assert Task.parents.filter(t2.key) == [t1.key]
|
||||
assert t1.childs.all_tasks() == [t2]
|
||||
assert t2.parents.all_names() == [t1.name]
|
||||
|
||||
|
93
solar/solar/dblayer/test/test_log.py
Normal file
93
solar/solar/dblayer/test/test_log.py
Normal file
@ -0,0 +1,93 @@
|
||||
import pytest
|
||||
|
||||
from solar.dblayer.solar_models import LogItem, NegativeCounter
|
||||
from solar.dblayer.model import StrInt
|
||||
|
||||
|
||||
def test_separate_logs():
|
||||
|
||||
history = 'history'
|
||||
staged = 'staged'
|
||||
history_uids = set()
|
||||
staged_uids = set()
|
||||
for i in range(2):
|
||||
l = LogItem.new({'log': history})
|
||||
l.save()
|
||||
history_uids.add(l.key)
|
||||
for i in range(3):
|
||||
l = LogItem.new({'log': staged})
|
||||
l.save()
|
||||
staged_uids.add(l.key)
|
||||
|
||||
assert set(LogItem.composite.filter({'log': history})) == history_uids
|
||||
assert set(LogItem.composite.filter({'log': staged})) == staged_uids
|
||||
|
||||
|
||||
def test_multiple_filter():
|
||||
|
||||
l1 = LogItem.new({'log': 'history', 'resource': 'a'})
|
||||
l2 = LogItem.new({'log': 'history', 'resource': 'b'})
|
||||
|
||||
l1.save()
|
||||
l2.save()
|
||||
assert LogItem.composite.filter({'log': 'history', 'resource': 'a'}) == [l1.key]
|
||||
assert LogItem.composite.filter({'log': 'history', 'resource': 'b'}) == [l2.key]
|
||||
|
||||
|
||||
def test_changed_index():
|
||||
|
||||
l = LogItem.new({'log': 'staged', 'resource': 'a', 'action': 'run'})
|
||||
l.save()
|
||||
|
||||
assert LogItem.composite.filter({'log': 'staged'}) == [l.key]
|
||||
|
||||
l.log = 'history'
|
||||
l.save()
|
||||
|
||||
assert LogItem.composite.filter({'log': 'staged'}) == []
|
||||
assert LogItem.composite.filter({'log': 'history'}) == [l.key]
|
||||
|
||||
|
||||
def test_negative_counter():
|
||||
nc = NegativeCounter.get_or_create('non_exist')
|
||||
assert nc.count == 0
|
||||
|
||||
|
||||
def test_reversed_order_is_preserved():
|
||||
added = []
|
||||
for i in range(4):
|
||||
li = LogItem.new({'log': 'history'})
|
||||
li.save()
|
||||
added.append(li.key)
|
||||
added.reverse()
|
||||
assert list(LogItem.history.filter(
|
||||
StrInt.n_max(), StrInt.n_min(), max_results=2)) == added[:2]
|
||||
|
||||
|
||||
def test_staged_not_indexed():
|
||||
added = []
|
||||
for i in range(3):
|
||||
li = LogItem.new({'log': 'staged'})
|
||||
li.save()
|
||||
added.append(li)
|
||||
|
||||
for li in added[:2]:
|
||||
li.log = 'history'
|
||||
li.save()
|
||||
|
||||
assert set(LogItem.history.filter(
|
||||
StrInt.n_max(), StrInt.n_min())) == {li.key for li in added[:2]}
|
||||
|
||||
|
||||
def test_history_last_filter():
|
||||
for i in range(4):
|
||||
li = LogItem.new({'log': 'history'})
|
||||
li.save()
|
||||
last = li
|
||||
|
||||
assert LogItem.history_last() == last
|
||||
|
||||
|
||||
def test_history_last_returns_none():
|
||||
assert LogItem.history_last() == None
|
||||
|
66
solar/solar/dblayer/test/test_nested.py
Normal file
66
solar/solar/dblayer/test/test_nested.py
Normal file
@ -0,0 +1,66 @@
|
||||
import pytest
|
||||
from solar.dblayer.model import (Field, IndexField,
|
||||
clear_cache, Model,
|
||||
NestedField,
|
||||
NestedModel,
|
||||
DBLayerNotFound,
|
||||
DBLayerNoRiakObj,
|
||||
DBLayerException)
|
||||
|
||||
|
||||
class N1(NestedModel):
|
||||
|
||||
f_nested1 = Field(str)
|
||||
f_nested2 = Field(int, default=150)
|
||||
|
||||
|
||||
|
||||
class M1(Model):
|
||||
|
||||
f1 = Field(str)
|
||||
f2 = NestedField(N1)
|
||||
f3 = NestedField(N1, hash_key='f_nested1')
|
||||
|
||||
|
||||
|
||||
def test_nested_simple(rk):
|
||||
|
||||
key = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key, {'f1': 'blah',
|
||||
'f2': {'f_nested1': 'foo'}})
|
||||
|
||||
assert m1.f2.f_nested1 == 'foo'
|
||||
assert m1.f2.f_nested2 == 150
|
||||
assert m1._modified_fields == set(['f1', 'f2'])
|
||||
assert m1._data_container == {'f1': 'blah', 'f2': {'f_nested1': 'foo', 'f_nested2': 150}}
|
||||
del m1.f2
|
||||
assert m1._data_container == {'f1': 'blah'}
|
||||
|
||||
|
||||
def test_nested(rk):
|
||||
key = next(rk)
|
||||
|
||||
m1 = M1.from_dict(key, {'f1': 'blah',
|
||||
'f2': {'f_nested1': 'foo'},
|
||||
'f3': {'f_nested1': 'foo', 'f_nested2': 150}})
|
||||
|
||||
assert m1.f2.f_nested1 == 'foo'
|
||||
assert m1.f2.f_nested2 == 150
|
||||
assert m1.f3['foo'].f_nested2 == 150
|
||||
|
||||
m1.f3['blah'].f_nested2 = 250
|
||||
|
||||
assert m1.f3['foo'].f_nested2 == 150
|
||||
assert m1.f3['blah'].f_nested2 == 250
|
||||
assert m1._modified_fields == set(['f1', 'f2', 'f3'])
|
||||
|
||||
exp = {'f1': 'blah', 'f2': {'f_nested1': 'foo', 'f_nested2': 150}, 'f3': {'blah': {'f_nested2': 250}, 'foo': {'f_nested1': 'foo', 'f_nested2': 150}}}
|
||||
assert m1._data_container == exp
|
||||
|
||||
del m1.f2
|
||||
exp.pop('f2')
|
||||
assert m1._data_container == exp
|
||||
|
||||
assert m1._modified_fields == set(['f1', 'f2', 'f3'])
|
||||
|
624
solar/solar/dblayer/test/test_real.py
Normal file
624
solar/solar/dblayer/test/test_real.py
Normal file
@ -0,0 +1,624 @@
|
||||
import pytest
|
||||
import random
|
||||
|
||||
from solar.dblayer.model import Model, Field, IndexField, clear_cache, check_state_for, StrInt
|
||||
from solar.dblayer.solar_models import Resource, DBLayerSolarException
|
||||
|
||||
|
||||
def create_resource(key, data):
|
||||
mi = data.get('meta_inputs', {})
|
||||
for inp_name, inp_value in data.get('inputs', {}).items():
|
||||
if isinstance(inp_value, list):
|
||||
if len(inp_value) == 1 and isinstance(inp_value[0], dict):
|
||||
schema = [{}]
|
||||
else:
|
||||
schema = ['str!']
|
||||
elif isinstance(inp_value, dict):
|
||||
schema = {}
|
||||
else:
|
||||
schema = '%s!' % type(inp_value).__name__
|
||||
mi.setdefault(inp_name, {"schema": schema})
|
||||
data['meta_inputs'] = mi
|
||||
return Resource.from_dict(key, data)
|
||||
|
||||
@pytest.mark.xfail(reason="Not YET decided how it should work")
|
||||
def test_changes_state(rk):
|
||||
key = next(rk)
|
||||
r = create_resource(key, {'name': 'a name'})
|
||||
r.inputs['a'] = 1
|
||||
with pytest.raises(Exception):
|
||||
# raise exception when something is changed
|
||||
val = r.inputs['a']
|
||||
r.save()
|
||||
check_state_for('index', r)
|
||||
|
||||
|
||||
def test_basic_input(rk):
|
||||
key = next(rk)
|
||||
r = create_resource(key, {'name': 'a name'})
|
||||
r.inputs['a'] = 1
|
||||
r.save()
|
||||
assert r.inputs['a'] == 1
|
||||
assert len(r._riak_object.indexes) == 2
|
||||
del r.inputs['a']
|
||||
r.save()
|
||||
with pytest.raises(DBLayerSolarException):
|
||||
assert r.inputs['a'] == 1
|
||||
assert len(r._riak_object.indexes) == 1
|
||||
|
||||
|
||||
def test_input_in_dict(rk):
|
||||
key = next(rk)
|
||||
r = create_resource(key, {'name': 'a name',
|
||||
'inputs': {'input1': 15,
|
||||
'input2': None}})
|
||||
r.save()
|
||||
assert r._riak_object.data['inputs']['input1'] == 15
|
||||
assert r.inputs['input1'] == 15
|
||||
|
||||
assert r._riak_object.data['inputs']['input2'] == None
|
||||
assert r.inputs['input2'] == None
|
||||
|
||||
|
||||
def test_basic_connect(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
|
||||
r1.connect(r2, {'input1': 'input1', 'input2': 'input2'})
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
assert r1._riak_object.data['inputs']['input1'] == 10
|
||||
assert r1.inputs['input1'] == 10
|
||||
|
||||
assert r2._riak_object.data['inputs']['input1'] == None
|
||||
assert r2.inputs['input1'] == 10
|
||||
|
||||
assert r1._riak_object.data['inputs']['input2'] == 15
|
||||
assert r1.inputs['input2'] == 15
|
||||
|
||||
assert r2._riak_object.data['inputs']['input2'] == None
|
||||
assert r2.inputs['input2'] == 15
|
||||
|
||||
|
||||
@pytest.mark.parametrize('depth', (3, 4, 5, 10, 25, 50))
|
||||
def test_adv_connect(rk, depth):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
prev = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None,
|
||||
'input3': 0}})
|
||||
conn = {'input1': 'input1', 'input2': 'input2'}
|
||||
r1.save()
|
||||
r1.connect(prev, conn)
|
||||
prev.save()
|
||||
created = [prev]
|
||||
|
||||
for x in xrange(depth - 1):
|
||||
k = next(rk)
|
||||
res = create_resource(k, {'name': 'next %d' % (x + 1),
|
||||
'inputs': {'input1': None,
|
||||
'input2': None,
|
||||
'input3': x + 1}})
|
||||
created.append(res)
|
||||
prev.connect(res, conn)
|
||||
res.save()
|
||||
prev = res
|
||||
|
||||
for i, c in enumerate(created):
|
||||
assert c.inputs['input1'] == 10
|
||||
assert c.inputs['input2'] == 15
|
||||
assert c.inputs['input3'] == i
|
||||
|
||||
|
||||
@pytest.mark.parametrize('depth', (1, 3, 5, 10, 50, 100))
|
||||
def test_perf_inputs(rk, depth):
|
||||
k1 = next(rk)
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 'target'}})
|
||||
|
||||
r1.save()
|
||||
prev = r1
|
||||
for x in xrange(depth):
|
||||
k = next(rk)
|
||||
res = create_resource(k, {'name': 'next %d' % (x + 1),
|
||||
'inputs': {'input1': None}})
|
||||
prev.connect(res, {'input1': 'input1'})
|
||||
res.save()
|
||||
prev = res
|
||||
|
||||
import time
|
||||
st = time.time()
|
||||
assert res.inputs['input1'] == 'target'
|
||||
end = time.time()
|
||||
print end - st
|
||||
|
||||
|
||||
def test_change_connect(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None,
|
||||
'input3': 0}})
|
||||
r3 = create_resource(k3, {'name': 'first',
|
||||
'inputs': {'input1': 30,
|
||||
'input2': 35}})
|
||||
|
||||
r1.connect(r2, {'input1': 'input1', 'input2': 'input2'})
|
||||
r3.connect(r2, {'input1': 'input1'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
|
||||
assert r2.inputs['input1'] == 30
|
||||
assert r2.inputs['input2'] == 15
|
||||
|
||||
|
||||
|
||||
def test_simple_tag(rk, rt):
|
||||
k1 = next(rk)
|
||||
tag = next(rt)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'tags': ['%s' % tag, '%s=10' % tag]})
|
||||
|
||||
r1.save()
|
||||
assert list(r1.tags) == ['%s=' % tag, '%s=10' % tag]
|
||||
|
||||
|
||||
def test_list_by_tag(rk, rt):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
tag1 = next(rt)
|
||||
tag2 = next(rt)
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'tags': [tag1, '%s=10' % tag1]})
|
||||
r1.save()
|
||||
|
||||
r2 = create_resource(k2, {'name': 'first',
|
||||
'tags': [tag1, '%s=10' % tag2]})
|
||||
r2.save()
|
||||
|
||||
assert len(Resource.tags.filter(tag1)) == 2
|
||||
assert Resource.tags.filter(tag1) == set([k1, k2])
|
||||
assert len(Resource.tags.filter('other_tag')) == 0
|
||||
|
||||
assert len(Resource.tags.filter(tag2)) == 0
|
||||
assert len(Resource.tags.filter(tag2, 10)) == 1
|
||||
assert Resource.tags.filter(tag2, 10) == set([k2])
|
||||
|
||||
assert len(Resource.tags.filter(tag2, '*')) == 1
|
||||
|
||||
|
||||
def test_updated_behaviour(rk):
|
||||
k1 = next(rk)
|
||||
|
||||
_cmp = StrInt()
|
||||
r1 = create_resource(k1, {'name': 'blah'})
|
||||
r1.save()
|
||||
assert isinstance(r1._riak_object.data['updated'], basestring)
|
||||
assert not isinstance(r1.updated, basestring)
|
||||
assert r1.updated >= _cmp
|
||||
assert k1 in Resource.updated.filter(StrInt.p_min(), StrInt.p_max())
|
||||
|
||||
|
||||
def test_updated_only_last(rk):
|
||||
|
||||
for i in range(3):
|
||||
r = create_resource(next(rk), {'name': str(i)})
|
||||
r.save()
|
||||
assert Resource.updated.filter(r.updated, StrInt.p_max()) == [r.key]
|
||||
|
||||
|
||||
def test_list_inputs(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': []}})
|
||||
|
||||
r1.connect(r2, {'input1': 'input'})
|
||||
r1.connect(r2, {'input2': 'input'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
assert r2.inputs['input'] == [10, 15]
|
||||
|
||||
|
||||
def test_dict_to_dict_inputs(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input': {'input1': 10,
|
||||
'input2': 15}
|
||||
}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': {'input1': None,
|
||||
'input2': None,
|
||||
'input3': None}}})
|
||||
|
||||
r1.connect(r2, {'input': 'input'})
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
assert r2.inputs['input']['input1'] == 10
|
||||
assert r2.inputs['input']['input2'] == 15
|
||||
assert 'input3' not in r2.inputs['input']
|
||||
|
||||
|
||||
def test_list_to_list_inputs(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input': [10, 15]}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': []}})
|
||||
|
||||
r1.connect(r2, {'input': 'input'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
assert r2.inputs['input'] == [10, 15]
|
||||
|
||||
|
||||
def test_simple_to_dict_inputs(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': {'input1': None,
|
||||
'input2': None}}})
|
||||
|
||||
|
||||
r1.connect(r2, {'input1': 'input:input1',
|
||||
'input2': 'input:input2'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
assert r2.inputs['input']['input1'] == 10
|
||||
assert r2.inputs['input']['input2'] == 15
|
||||
|
||||
|
||||
def test_simple_to_dict_inputs_with_tag(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r3 = create_resource(k3, {'name': 'first',
|
||||
'inputs': {'input1': 110,
|
||||
'input2': 115}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': {'input1': None,
|
||||
'input2': None}}})
|
||||
|
||||
|
||||
r1.connect(r2, {'input1': 'input:input1|tag'})
|
||||
r3.connect(r2, {'input2': 'input:input2|tag'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
|
||||
assert r2.inputs['input']['input1'] == 10
|
||||
assert r2.inputs['input']['input2'] == 115
|
||||
|
||||
|
||||
def test_simple_to_listdict_inputs(rk):
|
||||
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
k4 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r3 = create_resource(k3, {'name': 'first',
|
||||
'inputs': {'input1': 110,
|
||||
'input2': 115}})
|
||||
r4 = create_resource(k4, {'name': 'first',
|
||||
'inputs': {'input1': 1110,
|
||||
'input2': 1115}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': [{'input1': None,
|
||||
'input2': None}]}})
|
||||
|
||||
|
||||
r1.connect(r2, {'input1': 'input:input1',
|
||||
'input2': 'input:input2'})
|
||||
r3.connect(r2, {'input2': 'input:input2|tag2',
|
||||
'input1': 'input:input1|tag1'})
|
||||
r4.connect(r2, {'input2': 'input:input2|tag1',
|
||||
'input1': 'input:input1|tag2'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
r4.save()
|
||||
|
||||
assert r2.inputs['input'] == [{u'input2': 1115, u'input1': 110},
|
||||
{u'input2': 115, u'input1': 1110},
|
||||
{u'input2': 15, u'input1': 10}]
|
||||
|
||||
|
||||
def test_dict_to_list_inputs(rk):
|
||||
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'modules': [{}]}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'module': {'name': 'blah2'}}})
|
||||
r3 = create_resource(k3, {'name': 'third',
|
||||
'inputs': {'module': {'name': 'blah3'}}})
|
||||
|
||||
r2.connect(r1, {'module': 'modules'})
|
||||
r3.connect(r1, {'module': 'modules'})
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
|
||||
assert sorted(r1.inputs['modules']) == sorted([{'name': 'blah2'}, {'name': 'blah3'}])
|
||||
|
||||
|
||||
|
||||
|
||||
def test_passthrough_inputs(rk):
|
||||
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
r3 = create_resource(k3, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
|
||||
r2.connect(r3, {'input1': 'input1',
|
||||
'input2': 'input2'})
|
||||
r1.connect(r2, {'input1': 'input1',
|
||||
'input2': 'input2'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
|
||||
assert r3.inputs['input1'] == 10
|
||||
assert r3.inputs['input2'] == 15
|
||||
|
||||
|
||||
def test_disconnect_by_input(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
r3 = create_resource(k3, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
|
||||
r2.connect(r3, {'input1': 'input1',
|
||||
'input2': 'input2'})
|
||||
r1.connect(r2, {'input1': 'input1',
|
||||
'input2': 'input2'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
|
||||
with pytest.raises(Exception):
|
||||
r2.inputs['input1'] = 150
|
||||
|
||||
r2.inputs.disconnect('input1')
|
||||
|
||||
r2.save()
|
||||
|
||||
assert r2.inputs['input1'] is None
|
||||
|
||||
r2.inputs['input1'] = 150
|
||||
|
||||
r2.save()
|
||||
|
||||
assert r2.inputs['input1'] == 150
|
||||
assert r2.inputs['input2'] == 15
|
||||
|
||||
assert r3.inputs['input1'] == 150
|
||||
|
||||
|
||||
def test_resource_childs(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
r3 = create_resource(k3, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
|
||||
r2.connect(r3, {'input1': 'input1'})
|
||||
r1.connect(r2, {'input1': 'input1'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
r3.save()
|
||||
|
||||
assert set(Resource.childs([r1.key])) == {r1.key, r2.key, r3.key}
|
||||
|
||||
|
||||
def test_events(rk):
|
||||
k = next(rk)
|
||||
r1 = Resource.from_dict(k, {'events': ['event1', 'event2']})
|
||||
r1.save()
|
||||
assert r1.events == ['event1', 'event2']
|
||||
r1.events.pop()
|
||||
|
||||
assert r1.events == ['event1']
|
||||
|
||||
|
||||
def test_delete(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'first',
|
||||
'inputs': {'input1': None,
|
||||
'input2': None}})
|
||||
|
||||
r1.connect(r2, {'input1': 'input1'})
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
r1.delete()
|
||||
|
||||
recv_emit_bin = []
|
||||
for index in r2._riak_object.indexes:
|
||||
if 'recv' in index[0] or 'emit' in index[0]:
|
||||
recv_emit_bin.append(index)
|
||||
assert recv_emit_bin == []
|
||||
|
||||
|
||||
def test_delete_hash(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'input1': 10,
|
||||
'input2': 15}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'input': {'input1': None,
|
||||
'input2': None}}})
|
||||
|
||||
|
||||
r1.connect(r2, {'input1': 'input:input1',
|
||||
'input2': 'input:input2'})
|
||||
|
||||
r1.save()
|
||||
r2.save()
|
||||
|
||||
r1.delete()
|
||||
recv_emit_bin = []
|
||||
for index in r2._riak_object.indexes:
|
||||
if 'recv' in index[0] or 'emit' in index[0]:
|
||||
recv_emit_bin.append(index)
|
||||
assert recv_emit_bin == []
|
||||
|
||||
|
||||
def test_nested_simple_listdict(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
k4 = next(rk)
|
||||
k5 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'config': [{"backends": [{}],
|
||||
'listen_port': 1}]}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {'backend': {}}})
|
||||
r3 = create_resource(k3, {'name': 'third',
|
||||
'inputs': {'backend': {}}})
|
||||
r5 = create_resource(k5, {'name': 'fifth',
|
||||
'inputs': {"port": 5,
|
||||
"host": "fifth_host"}})
|
||||
r4 = create_resource(k4, {'name': 'fourth',
|
||||
'inputs': {"port": 4,
|
||||
"host": "fourth_host"}})
|
||||
|
||||
r4.connect(r2, {'port': "backend:port",
|
||||
'host': 'backend:host'})
|
||||
r5.connect(r3, {'port': "backend:port",
|
||||
'host': 'backend:host'})
|
||||
|
||||
|
||||
assert r2.inputs['backend'] == {'host': 'fourth_host', 'port': 4}
|
||||
assert r3.inputs['backend'] == {'host': 'fifth_host', 'port': 5}
|
||||
|
||||
r2.connect(r1, {'backend': 'config:backends'})
|
||||
r3.connect(r1, {'backend': 'config:backends'})
|
||||
|
||||
Resource.save_all_lazy()
|
||||
|
||||
backends = next(x['backends'] for x in r1.inputs['config'] if 'backends' in x)
|
||||
assert len(backends) == 2
|
||||
|
||||
|
||||
def test_nested_two_listdict(rk):
|
||||
k1 = next(rk)
|
||||
k2 = next(rk)
|
||||
k3 = next(rk)
|
||||
|
||||
r1 = create_resource(k1, {'name': 'first',
|
||||
'inputs': {'config': [{"backends": [{}],
|
||||
'something': 0}]}})
|
||||
r2 = create_resource(k2, {'name': 'second',
|
||||
'inputs': {"backends": [{"host": "second_host", "port": 2}],
|
||||
'something': 1}})
|
||||
r3 = create_resource(k3, {'name': 'third',
|
||||
'inputs': {"backends": [{"host": "third_host", "port": 3}],
|
||||
'something': 2}})
|
||||
|
||||
r2.connect(r1, {'backends': 'config:backends',
|
||||
'something': 'config:something'})
|
||||
r3.connect(r1, {'backends': 'config:backends',
|
||||
'something': 'config:something'})
|
||||
|
||||
Resource.save_all_lazy()
|
||||
|
||||
for sc in r1.inputs['config']:
|
||||
assert 'something' in sc
|
||||
assert 'backends' in sc
|
||||
assert isinstance(sc['backends'], list)
|
||||
assert isinstance(sc['something'], int)
|
@ -18,9 +18,9 @@ __all__ = ['add_dep', 'add_react', 'Dep', 'React', 'add_event']
|
||||
import networkx as nx
|
||||
|
||||
from solar.core.log import log
|
||||
from solar.interfaces import orm
|
||||
from solar.events.controls import Dep, React, StateChange
|
||||
|
||||
from solar.dblayer.solar_models import Resource
|
||||
|
||||
def create_event(event_dict):
|
||||
etype = event_dict['etype']
|
||||
@ -52,11 +52,7 @@ def add_event(ev):
|
||||
if ev == rev:
|
||||
break
|
||||
else:
|
||||
rst.append(ev)
|
||||
resource_events = orm.DBResourceEvents.get_or_create(ev.parent)
|
||||
event_db = orm.DBEvent(**ev.to_dict())
|
||||
event_db.save()
|
||||
resource_events.events.add(event_db)
|
||||
add_events(ev.parent, [ev])
|
||||
|
||||
|
||||
def add_dep(parent, dep, actions, state='success'):
|
||||
@ -76,34 +72,28 @@ def add_react(parent, dep, actions, state='success'):
|
||||
|
||||
|
||||
def add_events(resource, lst):
|
||||
resource_events = orm.DBResourceEvents.get_or_create(resource)
|
||||
for ev in lst:
|
||||
event_db = orm.DBEvent(**ev.to_dict())
|
||||
event_db.save()
|
||||
resource_events.events.add(event_db)
|
||||
|
||||
|
||||
def set_events(resource, lst):
|
||||
resource_events = orm.DBResourceEvents.get_or_create(resource)
|
||||
for ev in resource_events.events.as_set():
|
||||
ev.delete()
|
||||
for ev in lst:
|
||||
event_db = orm.DBEvent(**ev.to_dict())
|
||||
event_db.save()
|
||||
resource_events.events.add(event_db)
|
||||
resource = Resource.get(resource)
|
||||
events = resource.events
|
||||
# TODO: currently we don't track mutable objects
|
||||
events.extend([ev.to_dict() for ev in lst])
|
||||
resource.events = events
|
||||
# import pdb; pdb.settrace()
|
||||
resource.save_lazy()
|
||||
|
||||
|
||||
def remove_event(ev):
|
||||
event_db = orm.DBEvent(**ev.to_dict())
|
||||
event_db.delete()
|
||||
to_remove = ev.to_dict()
|
||||
resource = ev.parent
|
||||
resource = Resource.get(resource)
|
||||
# TODO: currently we don't track mutable objects
|
||||
events = resource.events
|
||||
events.remove(to_remove)
|
||||
resource.events = events
|
||||
resource.save_lazy()
|
||||
|
||||
|
||||
def all_events(resource):
|
||||
events = orm.DBResourceEvents.get_or_create(resource).events.as_set()
|
||||
|
||||
if not events:
|
||||
return []
|
||||
return [create_event(i.to_dict()) for i in events]
|
||||
return [create_event(e) for e in Resource.get(resource).events]
|
||||
|
||||
|
||||
def bft_events_graph(start):
|
||||
@ -161,6 +151,5 @@ def build_edges(changes_graph, events):
|
||||
for parent, child, data in events_graph.edges(event_name, data=True):
|
||||
succ_ev = data['event']
|
||||
succ_ev.insert(stack, changes_graph)
|
||||
|
||||
visited.add(event_name)
|
||||
return changes_graph
|
||||
|
@ -31,6 +31,8 @@ trigger action even if no changes noticed on dependent resource.
|
||||
- parent:update -> ok -> dependent:update
|
||||
"""
|
||||
|
||||
from solar.dblayer.solar_models import Resource
|
||||
from solar.dblayer.model import DBLayerNotFound
|
||||
|
||||
class Event(object):
|
||||
|
||||
@ -96,19 +98,14 @@ class React(Event):
|
||||
|
||||
if self.parent_node in changes_graph:
|
||||
if self.child_node not in changes_graph:
|
||||
# TODO: solve this circular import problem
|
||||
from solar.core import resource
|
||||
try:
|
||||
loaded_resource = resource.load(self.child)
|
||||
except KeyError:
|
||||
# orm throws this error when we're NOT using resource there
|
||||
location_id = Resource.get(self.child).inputs['location_id']
|
||||
except DBLayerNotFound:
|
||||
location_id = None
|
||||
else:
|
||||
location_id = loaded_resource.args['location_id']
|
||||
changes_graph.add_node(
|
||||
self.child_node, status='PENDING',
|
||||
target=location_id,
|
||||
errmsg=None, type='solar_resource',
|
||||
errmsg='', type='solar_resource',
|
||||
args=[self.child, self.child_action])
|
||||
|
||||
changes_graph.add_edge(
|
||||
@ -121,18 +118,13 @@ class StateChange(Event):
|
||||
etype = 'state_change'
|
||||
|
||||
def insert(self, changed_resources, changes_graph):
|
||||
changed_resources.append(self.parent)
|
||||
# TODO: solve this circular import problem
|
||||
from solar.core import resource
|
||||
changed_resources.append(self.parent_node)
|
||||
try:
|
||||
loaded_resource = resource.load(self.parent)
|
||||
except KeyError:
|
||||
# orm throws this error when we're NOT using resource there
|
||||
location_id = Resource.get(self.parent).inputs['location_id']
|
||||
except DBLayerNotFound:
|
||||
location_id = None
|
||||
else:
|
||||
location_id = loaded_resource.args['location_id']
|
||||
changes_graph.add_node(
|
||||
self.parent_node, status='PENDING',
|
||||
target=location_id,
|
||||
errmsg=None, type='solar_resource',
|
||||
errmsg='', type='solar_resource',
|
||||
args=[self.parent, self.parent_action])
|
||||
|
@ -1,38 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import importlib
|
||||
|
||||
db_backends = {
|
||||
'neo4j_db': ('solar.interfaces.db.neo4j', 'Neo4jDB'),
|
||||
'redis_db': ('solar.interfaces.db.redis_db', 'RedisDB'),
|
||||
'fakeredis_db': ('solar.interfaces.db.redis_db', 'FakeRedisDB'),
|
||||
'redis_graph_db': ('solar.interfaces.db.redis_graph_db', 'RedisGraphDB'),
|
||||
'fakeredis_graph_db': ('solar.interfaces.db.redis_graph_db', 'FakeRedisGraphDB'),
|
||||
}
|
||||
|
||||
CURRENT_DB = 'redis_graph_db'
|
||||
#CURRENT_DB = 'neo4j_db'
|
||||
|
||||
DB = None
|
||||
|
||||
|
||||
def get_db(backend=CURRENT_DB):
|
||||
# Should be retrieved from config
|
||||
global DB
|
||||
if DB is None:
|
||||
import_path, klass = db_backends[backend]
|
||||
module = importlib.import_module(import_path)
|
||||
DB = getattr(module, klass)()
|
||||
return DB
|
@ -1,236 +0,0 @@
|
||||
import abc
|
||||
from enum import Enum
|
||||
from functools import partial
|
||||
|
||||
|
||||
class Node(object):
|
||||
def __init__(self, db, uid, labels, properties):
|
||||
self.db = db
|
||||
self.uid = uid
|
||||
self.labels = labels
|
||||
self.properties = properties
|
||||
|
||||
@property
|
||||
def collection(self):
|
||||
return getattr(
|
||||
BaseGraphDB.COLLECTIONS,
|
||||
list(self.labels)[0]
|
||||
)
|
||||
|
||||
|
||||
class Relation(object):
|
||||
def __init__(self, db, start_node, end_node, properties):
|
||||
self.db = db
|
||||
self.start_node = start_node
|
||||
self.end_node = end_node
|
||||
self.properties = properties
|
||||
|
||||
|
||||
class DBObjectMeta(abc.ABCMeta):
|
||||
# Tuples of: function name, is-multi (i.e. returns a list)
|
||||
node_db_read_methods = [
|
||||
('all', True),
|
||||
('create', False),
|
||||
('get', False),
|
||||
('get_or_create', False),
|
||||
]
|
||||
relation_db_read_methods = [
|
||||
('all_relations', True),
|
||||
('create_relation', False),
|
||||
('get_relations', True),
|
||||
('get_relation', False),
|
||||
('get_or_create_relation', False),
|
||||
]
|
||||
|
||||
def __new__(cls, name, parents, dct):
|
||||
def from_db_list_decorator(converting_func, method):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
db_convert = kwargs.pop('db_convert', True)
|
||||
|
||||
result = method(self, *args, **kwargs)
|
||||
|
||||
if db_convert:
|
||||
return map(partial(converting_func, self), result)
|
||||
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
def from_db_decorator(converting_func, method):
|
||||
def wrapper(self, *args, **kwargs):
|
||||
db_convert = kwargs.pop('db_convert', True)
|
||||
|
||||
result = method(self, *args, **kwargs)
|
||||
|
||||
if result is None:
|
||||
return
|
||||
|
||||
if db_convert:
|
||||
return converting_func(self, result)
|
||||
|
||||
return result
|
||||
|
||||
return wrapper
|
||||
|
||||
node_db_to_object = cls.find_method(
|
||||
'node_db_to_object', name, parents, dct
|
||||
)
|
||||
relation_db_to_object = cls.find_method(
|
||||
'relation_db_to_object', name, parents, dct
|
||||
)
|
||||
|
||||
# Node conversions
|
||||
for method_name, is_list in cls.node_db_read_methods:
|
||||
method = cls.find_method(method_name, name, parents, dct)
|
||||
if is_list:
|
||||
func = from_db_list_decorator
|
||||
else:
|
||||
func = from_db_decorator
|
||||
# Handle subclasses
|
||||
if not getattr(method, '_wrapped', None):
|
||||
dct[method_name] = func(node_db_to_object, method)
|
||||
setattr(dct[method_name], '_wrapped', True)
|
||||
|
||||
# Relation conversions
|
||||
for method_name, is_list in cls.relation_db_read_methods:
|
||||
method = cls.find_method(method_name, name, parents, dct)
|
||||
if is_list:
|
||||
func = from_db_list_decorator
|
||||
else:
|
||||
func = from_db_decorator
|
||||
# Handle subclasses
|
||||
if not getattr(method, '_wrapped', None):
|
||||
dct[method_name] = func(relation_db_to_object, method)
|
||||
setattr(dct[method_name], '_wrapped', True)
|
||||
|
||||
return super(DBObjectMeta, cls).__new__(cls, name, parents, dct)
|
||||
|
||||
@classmethod
|
||||
def find_method(cls, method_name, class_name, parents, dict):
|
||||
if method_name in dict:
|
||||
return dict[method_name]
|
||||
|
||||
for parent in parents:
|
||||
method = getattr(parent, method_name)
|
||||
if method:
|
||||
return method
|
||||
|
||||
raise NotImplementedError(
|
||||
'"{}" method not implemented in class {}'.format(
|
||||
method_name, class_name
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class BaseGraphDB(object):
|
||||
__metaclass__ = DBObjectMeta
|
||||
|
||||
COLLECTIONS = Enum(
|
||||
'Collections',
|
||||
'input resource state_data state_log plan_node plan_graph events stage_log commit_log resource_events'
|
||||
)
|
||||
DEFAULT_COLLECTION=COLLECTIONS.resource
|
||||
RELATION_TYPES = Enum(
|
||||
'RelationTypes',
|
||||
'input_to_input resource_input plan_edge graph_to_node resource_event commited'
|
||||
)
|
||||
DEFAULT_RELATION=RELATION_TYPES.resource_input
|
||||
|
||||
@staticmethod
|
||||
def node_db_to_object(node_db):
|
||||
"""Convert node DB object to Node object."""
|
||||
|
||||
@staticmethod
|
||||
def object_to_node_db(node_obj):
|
||||
"""Convert Node object to node DB object."""
|
||||
|
||||
@staticmethod
|
||||
def relation_db_to_object(relation_db):
|
||||
"""Convert relation DB object to Relation object."""
|
||||
|
||||
@staticmethod
|
||||
def object_to_relation_db(relation_obj):
|
||||
"""Convert Relation object to relation DB object."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def all(self, collection=DEFAULT_COLLECTION):
|
||||
"""Return all elements (nodes) of type `collection`."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def all_relations(self, type_=DEFAULT_RELATION):
|
||||
"""Return all relations of type `type_`."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear(self):
|
||||
"""Clear the whole DB."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def clear_collection(self, collection=DEFAULT_COLLECTION):
|
||||
"""Clear all elements (nodes) of type `collection`."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def create(self, name, properties={}, collection=DEFAULT_COLLECTION):
|
||||
"""Create element (node) with given name, args, of type `collection`."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete(self, name, collection=DEFAULT_COLLECTION):
|
||||
"""Delete element with given name. of type `collection`."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def create_relation(self,
|
||||
source,
|
||||
dest,
|
||||
properties={},
|
||||
type_=DEFAULT_RELATION):
|
||||
"""
|
||||
Create relation (connection) of type `type_` from source to dest with
|
||||
given args.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(self, name, collection=DEFAULT_COLLECTION):
|
||||
"""Fetch element with given name and collection type."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_or_create(self,
|
||||
name,
|
||||
properties={},
|
||||
collection=DEFAULT_COLLECTION):
|
||||
"""
|
||||
Fetch or create element (if not exists) with given name, args of type
|
||||
`collection`.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def delete_relations(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=DEFAULT_RELATION,
|
||||
has_properties=None):
|
||||
"""Delete all relations of type `type_` from source to dest."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relations(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=DEFAULT_RELATION,
|
||||
has_properties=None):
|
||||
"""Fetch all relations of type `type_` from source to dest.
|
||||
|
||||
NOTE that this function must return only direct relations (edges)
|
||||
between vertices `source` and `dest` of type `type_`.
|
||||
|
||||
If you want all PATHS between `source` and `dest`, write another
|
||||
method for this (`get_paths`)."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_relation(self, source, dest, type_=DEFAULT_RELATION):
|
||||
"""Fetch relations with given source, dest and type_."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_or_create_relation(self,
|
||||
source,
|
||||
dest,
|
||||
properties={},
|
||||
type_=DEFAULT_RELATION):
|
||||
"""Fetch or create relation with given args."""
|
@ -1,205 +0,0 @@
|
||||
import json
|
||||
from copy import deepcopy
|
||||
import py2neo
|
||||
|
||||
from solar.core import log
|
||||
|
||||
from .base import BaseGraphDB, Node, Relation
|
||||
|
||||
|
||||
class Neo4jDB(BaseGraphDB):
|
||||
DB = {
|
||||
'host': 'localhost',
|
||||
'port': 7474,
|
||||
}
|
||||
NEO4J_CLIENT = py2neo.Graph
|
||||
|
||||
def __init__(self):
|
||||
self._r = self.NEO4J_CLIENT('http://{host}:{port}/db/data/'.format(
|
||||
**self.DB
|
||||
))
|
||||
|
||||
def node_db_to_object(self, node_db):
|
||||
return Node(
|
||||
self,
|
||||
node_db.properties['name'],
|
||||
node_db.labels,
|
||||
# Neo4j Node.properties is some strange PropertySet, use dict instead
|
||||
dict(**node_db.properties)
|
||||
)
|
||||
|
||||
def relation_db_to_object(self, relation_db):
|
||||
return Relation(
|
||||
self,
|
||||
self.node_db_to_object(relation_db.start_node),
|
||||
self.node_db_to_object(relation_db.end_node),
|
||||
relation_db.properties
|
||||
)
|
||||
|
||||
def all(self, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
return [
|
||||
r.n for r in self._r.cypher.execute(
|
||||
'MATCH (n:%(collection)s) RETURN n' % {
|
||||
'collection': collection.name,
|
||||
}
|
||||
)
|
||||
]
|
||||
|
||||
def all_relations(self, type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
return [
|
||||
r.r for r in self._r.cypher.execute(
|
||||
*self._relations_query(
|
||||
source=None, dest=None, type_=type_
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
def clear(self):
|
||||
log.log.debug('Clearing whole DB')
|
||||
|
||||
self._r.delete_all()
|
||||
|
||||
def clear_collection(self, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
log.log.debug('Clearing collection %s', collection.name)
|
||||
|
||||
# TODO: make single DELETE query
|
||||
self._r.delete([r.n for r in self.all(collection=collection)])
|
||||
|
||||
def create(self, name, properties={}, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
log.log.debug(
|
||||
'Creating %s, name %s with properties %s',
|
||||
collection.name,
|
||||
name,
|
||||
properties
|
||||
)
|
||||
|
||||
properties = deepcopy(properties)
|
||||
properties['name'] = name
|
||||
|
||||
n = py2neo.Node(collection.name, **properties)
|
||||
return self._r.create(n)[0]
|
||||
|
||||
def create_relation(self,
|
||||
source,
|
||||
dest,
|
||||
properties={},
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
log.log.debug(
|
||||
'Creating %s from %s to %s with properties %s',
|
||||
type_.name,
|
||||
source.properties['name'],
|
||||
dest.properties['name'],
|
||||
properties
|
||||
)
|
||||
s = self.get(
|
||||
source.properties['name'],
|
||||
collection=source.collection,
|
||||
db_convert=False
|
||||
)
|
||||
d = self.get(
|
||||
dest.properties['name'],
|
||||
collection=dest.collection,
|
||||
db_convert=False
|
||||
)
|
||||
r = py2neo.Relationship(s, type_.name, d, **properties)
|
||||
self._r.create(r)
|
||||
|
||||
return r
|
||||
|
||||
def _get_query(self, name, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
return 'MATCH (n:%(collection)s {name:{name}}) RETURN n' % {
|
||||
'collection': collection.name,
|
||||
}, {
|
||||
'name': name,
|
||||
}
|
||||
|
||||
def get(self, name, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
query, kwargs = self._get_query(name, collection=collection)
|
||||
res = self._r.cypher.execute(query, kwargs)
|
||||
|
||||
if res:
|
||||
return res[0].n
|
||||
|
||||
def get_or_create(self,
|
||||
name,
|
||||
properties={},
|
||||
collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
n = self.get(name, collection=collection, db_convert=False)
|
||||
|
||||
if n:
|
||||
if properties != n.properties:
|
||||
n.properties.update(properties)
|
||||
n.push()
|
||||
return n
|
||||
|
||||
return self.create(name, properties=properties, collection=collection)
|
||||
|
||||
def _relations_query(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION,
|
||||
query_type='RETURN'):
|
||||
kwargs = {}
|
||||
source_query = '(n)'
|
||||
if source:
|
||||
source_query = '(n {name:{source_name}})'
|
||||
kwargs['source_name'] = source.properties['name']
|
||||
dest_query = '(m)'
|
||||
if dest:
|
||||
dest_query = '(m {name:{dest_name}})'
|
||||
kwargs['dest_name'] = dest.properties['name']
|
||||
rel_query = '[r:%(type_)s]' % {'type_': type_.name}
|
||||
|
||||
query = ('MATCH %(source_query)s-%(rel_query)s->'
|
||||
'%(dest_query)s %(query_type)s r' % {
|
||||
'dest_query': dest_query,
|
||||
'query_type': query_type,
|
||||
'rel_query': rel_query,
|
||||
'source_query': source_query,
|
||||
})
|
||||
|
||||
return query, kwargs
|
||||
|
||||
def delete_relations(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
query, kwargs = self._relations_query(
|
||||
source=source, dest=dest, type_=type_, query_type='DELETE'
|
||||
)
|
||||
|
||||
self._r.cypher.execute(query, kwargs)
|
||||
|
||||
def get_relations(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
query, kwargs = self._relations_query(
|
||||
source=source, dest=dest, type_=type_
|
||||
)
|
||||
|
||||
res = self._r.cypher.execute(query, kwargs)
|
||||
|
||||
return [r.r for r in res]
|
||||
|
||||
def get_relation(self, source, dest, type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
rel = self.get_relations(source=source, dest=dest, type_=type_)
|
||||
|
||||
if rel:
|
||||
return rel[0]
|
||||
|
||||
def get_or_create_relation(self,
|
||||
source,
|
||||
dest,
|
||||
properties={},
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
rel = self.get_relations(source=source, dest=dest, type_=type_)
|
||||
|
||||
if rel:
|
||||
r = rel[0]
|
||||
if properties != r.properties:
|
||||
r.properties.update(properties)
|
||||
r.push()
|
||||
return r
|
||||
|
||||
return self.create_relation(source, dest, properties=properties, type_=type_)
|
@ -1,156 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from enum import Enum
|
||||
try:
|
||||
import ujson as json
|
||||
except ImportError:
|
||||
import json
|
||||
|
||||
import redis
|
||||
import fakeredis
|
||||
|
||||
|
||||
class RedisDB(object):
|
||||
COLLECTIONS = Enum(
|
||||
'Collections',
|
||||
'connection resource state_data state_log events'
|
||||
)
|
||||
DB = {
|
||||
'host': 'localhost',
|
||||
'port': 6379,
|
||||
}
|
||||
REDIS_CLIENT = redis.StrictRedis
|
||||
|
||||
def __init__(self):
|
||||
self._r = self.REDIS_CLIENT(**self.DB)
|
||||
self.entities = {}
|
||||
|
||||
def read(self, uid, collection=COLLECTIONS.resource):
|
||||
try:
|
||||
return json.loads(
|
||||
self._r.get(self._make_key(collection, uid))
|
||||
)
|
||||
except TypeError:
|
||||
return None
|
||||
|
||||
def get_list(self, collection=COLLECTIONS.resource):
|
||||
key_glob = self._make_key(collection, '*')
|
||||
|
||||
keys = self._r.keys(key_glob)
|
||||
|
||||
with self._r.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
|
||||
values = [self._r.get(key) for key in keys]
|
||||
|
||||
pipe.execute()
|
||||
|
||||
for value in values:
|
||||
yield json.loads(value)
|
||||
|
||||
def save(self, uid, data, collection=COLLECTIONS.resource):
|
||||
ret = self._r.set(
|
||||
self._make_key(collection, uid),
|
||||
json.dumps(data)
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
def save_list(self, lst, collection=COLLECTIONS.resource):
|
||||
with self._r.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
|
||||
for uid, data in lst:
|
||||
key = self._make_key(collection, uid)
|
||||
pipe.set(key, json.dumps(data))
|
||||
|
||||
pipe.execute()
|
||||
|
||||
def clear(self):
|
||||
self._r.flushdb()
|
||||
|
||||
def get_ordered_hash(self, collection):
|
||||
return OrderedHash(self._r, collection)
|
||||
|
||||
def clear_collection(self, collection=COLLECTIONS.resource):
|
||||
key_glob = self._make_key(collection, '*')
|
||||
|
||||
self._r.delete(self._r.keys(key_glob))
|
||||
|
||||
def delete(self, uid, collection=COLLECTIONS.resource):
|
||||
self._r.delete(self._make_key(collection, uid))
|
||||
|
||||
def _make_key(self, collection, _id):
|
||||
if isinstance(collection, self.COLLECTIONS):
|
||||
collection = collection.name
|
||||
|
||||
# NOTE: hiera-redis backend depends on this!
|
||||
return '{0}:{1}'.format(collection, _id)
|
||||
|
||||
|
||||
class OrderedHash(object):
|
||||
|
||||
def __init__(self, client, collection):
|
||||
self.r = client
|
||||
self.collection = collection
|
||||
self.order_counter = '{}:incr'.format(collection)
|
||||
self.order = '{}:order'.format(collection)
|
||||
|
||||
def add(self, items):
|
||||
pipe = self.r.pipeline()
|
||||
for key, value in items:
|
||||
count = self.r.incr(self.order_counter)
|
||||
pipe.zadd(self.order, count, key)
|
||||
pipe.hset(self.collection, key, json.dumps(value))
|
||||
pipe.execute()
|
||||
|
||||
def rem(self, keys):
|
||||
pipe = self.r.pipeline()
|
||||
for key in keys:
|
||||
pipe.zrem(self.order, key)
|
||||
pipe.hdel(self.collection, key)
|
||||
pipe.execute()
|
||||
|
||||
def get(self, key):
|
||||
value = self.r.hget(self.collection, key)
|
||||
if value:
|
||||
return json.loads(value)
|
||||
return None
|
||||
|
||||
def update(self, key, value):
|
||||
self.r.hset(self.collection, key, json.dumps(value))
|
||||
|
||||
def clean(self):
|
||||
self.rem(self.r.zrange(self.order, 0, -1))
|
||||
|
||||
def rem_left(self, n=1):
|
||||
self.rem(self.r.zrevrange(self.order, 0, n-1))
|
||||
|
||||
def reverse(self, n=1):
|
||||
result = []
|
||||
for key in self.r.zrevrange(self.order, 0, n-1):
|
||||
result.append(self.get(key))
|
||||
return result
|
||||
|
||||
def list(self, n=0):
|
||||
result = []
|
||||
for key in self.r.zrange(self.order, 0, n-1):
|
||||
result.append(self.get(key))
|
||||
return result
|
||||
|
||||
|
||||
class FakeRedisDB(RedisDB):
|
||||
|
||||
REDIS_CLIENT = fakeredis.FakeStrictRedis
|
@ -1,300 +0,0 @@
|
||||
try:
|
||||
import ujson as json
|
||||
except ImportError:
|
||||
import json
|
||||
import redis
|
||||
import fakeredis
|
||||
|
||||
from .base import BaseGraphDB, Node, Relation
|
||||
from .redis_db import OrderedHash
|
||||
|
||||
|
||||
class RedisGraphDB(BaseGraphDB):
|
||||
DB = {
|
||||
'host': 'localhost',
|
||||
'port': 6379,
|
||||
}
|
||||
REDIS_CLIENT = redis.StrictRedis
|
||||
|
||||
def __init__(self):
|
||||
self._r = self.REDIS_CLIENT(**self.DB)
|
||||
self.entities = {}
|
||||
|
||||
def node_db_to_object(self, node_db):
|
||||
if isinstance(node_db, Node):
|
||||
return node_db
|
||||
|
||||
return Node(
|
||||
self,
|
||||
node_db['name'],
|
||||
[node_db['collection']],
|
||||
node_db['properties']
|
||||
)
|
||||
|
||||
def relation_db_to_object(self, relation_db):
|
||||
if isinstance(relation_db, Relation):
|
||||
return relation_db
|
||||
|
||||
if relation_db['type_'] == BaseGraphDB.RELATION_TYPES.input_to_input.name:
|
||||
source_collection = BaseGraphDB.COLLECTIONS.input
|
||||
dest_collection = BaseGraphDB.COLLECTIONS.input
|
||||
elif relation_db['type_'] == BaseGraphDB.RELATION_TYPES.resource_input.name:
|
||||
source_collection = BaseGraphDB.COLLECTIONS.resource
|
||||
dest_collection = BaseGraphDB.COLLECTIONS.input
|
||||
elif relation_db['type_'] == BaseGraphDB.RELATION_TYPES.resource_event.name:
|
||||
source_collection = BaseGraphDB.COLLECTIONS.resource_events
|
||||
dest_collection = BaseGraphDB.COLLECTIONS.events
|
||||
|
||||
source = self.get(relation_db['source'], collection=source_collection)
|
||||
dest = self.get(relation_db['dest'], collection=dest_collection)
|
||||
|
||||
return Relation(
|
||||
self,
|
||||
source,
|
||||
dest,
|
||||
relation_db['properties']
|
||||
)
|
||||
|
||||
def all(self, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
"""Return all elements (nodes) of type `collection`."""
|
||||
|
||||
key_glob = self._make_collection_key(collection, '*')
|
||||
|
||||
for result in self._all(key_glob):
|
||||
yield result
|
||||
|
||||
def all_relations(self, type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
"""Return all relations of type `type_`."""
|
||||
key_glob = self._make_relation_key(type_, '*')
|
||||
for result in self._all(key_glob):
|
||||
yield result
|
||||
|
||||
def _all(self, key_glob):
|
||||
keys = self._r.keys(key_glob)
|
||||
|
||||
with self._r.pipeline() as pipe:
|
||||
pipe.multi()
|
||||
|
||||
values = [self._r.get(key) for key in keys]
|
||||
|
||||
pipe.execute()
|
||||
|
||||
for value in values:
|
||||
yield json.loads(value)
|
||||
|
||||
def clear(self):
|
||||
"""Clear the whole DB."""
|
||||
|
||||
self._r.flushdb()
|
||||
|
||||
def clear_collection(self, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
"""Clear all elements (nodes) of type `collection`."""
|
||||
|
||||
key_glob = self._make_collection_key(collection, '*')
|
||||
|
||||
self._r.delete(self._r.keys(key_glob))
|
||||
|
||||
def create(self, name, properties={}, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
"""Create element (node) with given name, properties, of type `collection`."""
|
||||
|
||||
if isinstance(collection, self.COLLECTIONS):
|
||||
collection = collection.name
|
||||
|
||||
properties = {
|
||||
'name': name,
|
||||
'properties': properties,
|
||||
'collection': collection,
|
||||
}
|
||||
|
||||
self._r.set(
|
||||
self._make_collection_key(collection, name),
|
||||
json.dumps(properties)
|
||||
)
|
||||
|
||||
return properties
|
||||
|
||||
def create_relation(self,
|
||||
source,
|
||||
dest,
|
||||
properties={},
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
"""
|
||||
Create relation (connection) of type `type_` from source to dest with
|
||||
given properties.
|
||||
"""
|
||||
return self.create_relation_str(
|
||||
source.uid, dest.uid, properties, type_=type_)
|
||||
|
||||
def create_relation_str(self, source, dest,
|
||||
properties={}, type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
if isinstance(type_, self.RELATION_TYPES):
|
||||
type_ = type_.name
|
||||
|
||||
uid = self._make_relation_uid(source, dest)
|
||||
|
||||
properties = {
|
||||
'source': source,
|
||||
'dest': dest,
|
||||
'properties': properties,
|
||||
'type_': type_,
|
||||
}
|
||||
|
||||
self._r.set(
|
||||
self._make_relation_key(type_, uid),
|
||||
json.dumps(properties)
|
||||
)
|
||||
|
||||
return properties
|
||||
|
||||
def get(self, name, collection=BaseGraphDB.DEFAULT_COLLECTION,
|
||||
return_empty=False):
|
||||
"""Fetch element with given name and collection type."""
|
||||
try:
|
||||
collection_key = self._make_collection_key(collection, name)
|
||||
item = self._r.get(collection_key)
|
||||
if not item and return_empty:
|
||||
return item
|
||||
return json.loads(item)
|
||||
except TypeError:
|
||||
raise KeyError(collection_key)
|
||||
|
||||
def delete(self, name, collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
keys = self._r.keys(self._make_collection_key(collection, name))
|
||||
if keys:
|
||||
self._r.delete(*keys)
|
||||
|
||||
def get_or_create(self,
|
||||
name,
|
||||
properties={},
|
||||
collection=BaseGraphDB.DEFAULT_COLLECTION):
|
||||
"""
|
||||
Fetch or create element (if not exists) with given name, properties of
|
||||
type `collection`.
|
||||
"""
|
||||
|
||||
try:
|
||||
return self.get(name, collection=collection)
|
||||
except KeyError:
|
||||
return self.create(name, properties=properties, collection=collection)
|
||||
|
||||
def _relations_glob(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
if source is None:
|
||||
source = '*'
|
||||
else:
|
||||
source = source.uid
|
||||
if dest is None:
|
||||
dest = '*'
|
||||
else:
|
||||
dest = dest.uid
|
||||
|
||||
return self._make_relation_key(type_, self._make_relation_uid(source, dest))
|
||||
|
||||
def delete_relations(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION,
|
||||
has_properties=None):
|
||||
"""Delete all relations of type `type_` from source to dest."""
|
||||
|
||||
glob = self._relations_glob(source=source, dest=dest, type_=type_)
|
||||
keys = self._r.keys(glob)
|
||||
|
||||
if not keys:
|
||||
return
|
||||
|
||||
if not has_properties:
|
||||
self._r.delete(*keys)
|
||||
|
||||
rels = self.get_relations(
|
||||
source=source, dest=dest, type_=type_, has_properties=has_properties
|
||||
)
|
||||
for r in rels:
|
||||
self.delete_relations(
|
||||
source=r.start_node,
|
||||
dest=r.end_node,
|
||||
type_=type_
|
||||
)
|
||||
|
||||
def get_relations(self,
|
||||
source=None,
|
||||
dest=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION,
|
||||
has_properties=None):
|
||||
"""Fetch all relations of type `type_` from source to dest."""
|
||||
|
||||
glob = self._relations_glob(source=source, dest=dest, type_=type_)
|
||||
|
||||
def check_has_properties(r):
|
||||
if has_properties:
|
||||
for k, v in has_properties.items():
|
||||
if not r['properties'].get(k) == v:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
for r in self._all(glob):
|
||||
# Glob is primitive, we must filter stuff correctly here
|
||||
if source and r['source'] != source.uid:
|
||||
continue
|
||||
if dest and r['dest'] != dest.uid:
|
||||
continue
|
||||
if not check_has_properties(r):
|
||||
continue
|
||||
yield r
|
||||
|
||||
def get_relation(self, source, dest, type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
"""Fetch relations with given source, dest and type_."""
|
||||
|
||||
uid = self._make_relation_key(source.uid, dest.uid)
|
||||
try:
|
||||
return json.loads(
|
||||
self._r.get(self._make_relation_key(type_, uid))
|
||||
)
|
||||
except TypeError:
|
||||
raise KeyError
|
||||
|
||||
def get_or_create_relation(self,
|
||||
source,
|
||||
dest,
|
||||
properties=None,
|
||||
type_=BaseGraphDB.DEFAULT_RELATION):
|
||||
"""Fetch or create relation with given properties."""
|
||||
properties = properties or {}
|
||||
|
||||
try:
|
||||
return self.get_relation(source, dest, type_=type_)
|
||||
except KeyError:
|
||||
return self.create_relation(source, dest, properties=properties, type_=type_)
|
||||
|
||||
def _make_collection_key(self, collection, _id):
|
||||
if isinstance(collection, self.COLLECTIONS):
|
||||
collection = collection.name
|
||||
|
||||
# NOTE: hiera-redis backend depends on this!
|
||||
return '{0}:{1}'.format(collection, _id)
|
||||
|
||||
def _make_relation_uid(self, source, dest):
|
||||
"""
|
||||
There can be only one relation from source to dest, that's why
|
||||
this function works.
|
||||
"""
|
||||
|
||||
return '{0}-{1}'.format(source, dest)
|
||||
|
||||
def _make_relation_key(self, type_, _id):
|
||||
if isinstance(type_, self.RELATION_TYPES):
|
||||
type_ = type_.name
|
||||
|
||||
# NOTE: hiera-redis backend depends on this!
|
||||
return '{0}:{1}'.format(type_, _id)
|
||||
|
||||
def get_ordered_hash(self, collection):
|
||||
return OrderedHash(self._r, collection)
|
||||
|
||||
|
||||
class FakeRedisGraphDB(RedisGraphDB):
|
||||
|
||||
REDIS_CLIENT = fakeredis.FakeStrictRedis
|
@ -1,735 +0,0 @@
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import networkx
|
||||
import uuid
|
||||
|
||||
from solar import errors
|
||||
from solar.core import validation
|
||||
from solar.interfaces.db import base
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
import os
|
||||
|
||||
# USE_CACHE could be set only from CLI
|
||||
USE_CACHE = int(os.getenv("USE_CACHE", 0))
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
|
||||
from functools import wraps
|
||||
|
||||
def _delete_from(store):
|
||||
def _wrp(key):
|
||||
try:
|
||||
del store[key]
|
||||
except KeyError:
|
||||
pass
|
||||
return _wrp
|
||||
|
||||
|
||||
def cache_me(store):
|
||||
def _inner(f):
|
||||
# attaching to functions even when no cache enabled for consistency
|
||||
f._cache_store = store
|
||||
f._cache_del = _delete_from(store)
|
||||
@wraps(f)
|
||||
def _inner2(obj, *args, **kwargs):
|
||||
try:
|
||||
return store[obj.id]
|
||||
except KeyError:
|
||||
pass
|
||||
val = f(obj, *args, **kwargs)
|
||||
if obj.id.startswith('location_id'):
|
||||
if not val.value:
|
||||
return val
|
||||
if obj.id.startswith('transports_id'):
|
||||
if not val.value:
|
||||
return val
|
||||
if isinstance(val, list):
|
||||
return val
|
||||
else:
|
||||
if not val.value:
|
||||
return val
|
||||
store[obj.id] = val
|
||||
return val
|
||||
if USE_CACHE:
|
||||
return _inner2
|
||||
else:
|
||||
return f
|
||||
return _inner
|
||||
|
||||
|
||||
class DBField(object):
|
||||
is_primary = False
|
||||
schema = None
|
||||
schema_in_field = None
|
||||
default_value = None
|
||||
|
||||
def __init__(self, name, value=None):
|
||||
self.name = name
|
||||
self.value = value
|
||||
if value is None:
|
||||
self.value = self.default_value
|
||||
|
||||
def __eq__(self, inst):
|
||||
return self.name == inst.name and self.value == inst.value
|
||||
|
||||
def __ne__(self, inst):
|
||||
return not self.__eq__(inst)
|
||||
|
||||
def __hash__(self):
|
||||
return hash('{}:{}'.format(self.name, self.value))
|
||||
|
||||
def validate(self):
|
||||
if self.schema is None:
|
||||
return
|
||||
|
||||
es = validation.validate_input(self.value, schema=self.schema)
|
||||
if es:
|
||||
raise errors.ValidationError('"{}": {}'.format(self.name, es[0]))
|
||||
|
||||
|
||||
def db_field(schema=None,
|
||||
schema_in_field=None,
|
||||
default_value=None,
|
||||
is_primary=False):
|
||||
"""Definition for the DB field.
|
||||
|
||||
schema - simple schema according to the one in solar.core.validation
|
||||
schema_in_field - if you don't want to fix schema, you can specify
|
||||
another field in DBObject that will represent the schema used
|
||||
for validation of this field
|
||||
is_primary - only one field in db object can be primary. This key is used
|
||||
for creating key in the DB
|
||||
"""
|
||||
|
||||
class DBFieldX(DBField):
|
||||
pass
|
||||
|
||||
DBFieldX.is_primary = is_primary
|
||||
DBFieldX.schema = schema
|
||||
DBFieldX.schema_in_field = schema_in_field
|
||||
if default_value is not None:
|
||||
DBFieldX.default_value = default_value
|
||||
|
||||
return DBFieldX
|
||||
|
||||
|
||||
class DBRelatedField(object):
|
||||
source_db_class = None
|
||||
destination_db_class = None
|
||||
relation_type = None
|
||||
|
||||
def __init__(self, name, source_db_object):
|
||||
self.name = name
|
||||
self.source_db_object = source_db_object
|
||||
|
||||
@classmethod
|
||||
def graph(self):
|
||||
relations = db.get_relations(type_=self.relation_type)
|
||||
|
||||
g = networkx.MultiDiGraph()
|
||||
|
||||
for r in relations:
|
||||
source = self.source_db_class(**r.start_node.properties)
|
||||
dest = self.destination_db_class(**r.end_node.properties)
|
||||
properties = r.properties.copy()
|
||||
g.add_edge(
|
||||
source,
|
||||
dest,
|
||||
attr_dict=properties
|
||||
)
|
||||
|
||||
return g
|
||||
|
||||
def all(self):
|
||||
source_db_node = self.source_db_object._db_node
|
||||
|
||||
if source_db_node is None:
|
||||
return []
|
||||
|
||||
return db.get_relations(source=source_db_node,
|
||||
type_=self.relation_type)
|
||||
|
||||
def all_by_dest(self, destination_db_object):
|
||||
destination_db_node = destination_db_object._db_node
|
||||
|
||||
if destination_db_node is None:
|
||||
return set()
|
||||
|
||||
return db.get_relations(dest=destination_db_node,
|
||||
type_=self.relation_type)
|
||||
|
||||
def add(self, *destination_db_objects):
|
||||
for dest in destination_db_objects:
|
||||
if not isinstance(dest, self.destination_db_class):
|
||||
raise errors.SolarError(
|
||||
'Object {} is of incompatible type {}.'.format(
|
||||
dest, self.destination_db_class
|
||||
)
|
||||
)
|
||||
|
||||
db.get_or_create_relation(
|
||||
self.source_db_object._db_node,
|
||||
dest._db_node,
|
||||
properties={},
|
||||
type_=self.relation_type
|
||||
)
|
||||
|
||||
def add_hash(self, destination_db_object, destination_key, tag=None):
|
||||
if not isinstance(destination_db_object, self.destination_db_class):
|
||||
raise errors.SolarError(
|
||||
'Object {} is of incompatible type {}.'.format(
|
||||
destination_db_object, self.destination_db_class
|
||||
)
|
||||
)
|
||||
|
||||
db.get_or_create_relation(
|
||||
self.source_db_object._db_node,
|
||||
destination_db_object._db_node,
|
||||
properties={'destination_key': destination_key, 'tag': tag},
|
||||
type_=self.relation_type
|
||||
)
|
||||
|
||||
def remove(self, *destination_db_objects):
|
||||
for dest in destination_db_objects:
|
||||
db.delete_relations(
|
||||
source=self.source_db_object._db_node,
|
||||
dest=dest._db_node,
|
||||
type_=self.relation_type
|
||||
)
|
||||
|
||||
def as_set(self):
|
||||
"""
|
||||
Return DB objects that are destinations for self.source_db_object.
|
||||
"""
|
||||
|
||||
relations = self.all()
|
||||
|
||||
ret = set()
|
||||
|
||||
for rel in relations:
|
||||
ret.add(
|
||||
self.destination_db_class(**rel.end_node.properties)
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
def as_list(self):
|
||||
relations = self.all()
|
||||
|
||||
ret = []
|
||||
|
||||
for rel in relations:
|
||||
ret.append(
|
||||
self.destination_db_class(**rel.end_node.properties)
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
def sources(self, destination_db_object):
|
||||
"""
|
||||
Reverse of self.as_set, i.e. for given destination_db_object,
|
||||
return source DB objects.
|
||||
"""
|
||||
|
||||
relations = self.all_by_dest(destination_db_object)
|
||||
|
||||
ret = set()
|
||||
|
||||
for rel in relations:
|
||||
ret.add(
|
||||
self.source_db_class(**rel.start_node.properties)
|
||||
)
|
||||
|
||||
return ret
|
||||
|
||||
def delete_all_incoming(self,
|
||||
destination_db_object,
|
||||
destination_key=None,
|
||||
tag=None):
|
||||
"""
|
||||
Delete all relations for which destination_db_object is an end node.
|
||||
|
||||
If object is a hash, you can additionally specify the dst_key argument.
|
||||
Then only connections that are destinations of dst_key will be deleted.
|
||||
|
||||
Same with tag.
|
||||
"""
|
||||
properties = {}
|
||||
if destination_key is not None:
|
||||
properties['destination_key'] = destination_key
|
||||
if tag is not None:
|
||||
properties['tag'] = tag
|
||||
|
||||
db.delete_relations(
|
||||
dest=destination_db_object._db_node,
|
||||
type_=self.relation_type,
|
||||
has_properties=properties or None
|
||||
)
|
||||
|
||||
|
||||
def db_related_field(relation_type, destination_db_class):
|
||||
class DBRelatedFieldX(DBRelatedField):
|
||||
pass
|
||||
|
||||
DBRelatedFieldX.relation_type = relation_type
|
||||
DBRelatedFieldX.destination_db_class = destination_db_class
|
||||
|
||||
return DBRelatedFieldX
|
||||
|
||||
|
||||
class DBObjectMeta(type):
|
||||
def __new__(cls, name, parents, dct):
|
||||
collection = dct.get('_collection')
|
||||
if not collection:
|
||||
raise NotImplementedError('Collection is required.')
|
||||
|
||||
dct['_meta'] = {}
|
||||
dct['_meta']['fields'] = {}
|
||||
dct['_meta']['related_to'] = {}
|
||||
|
||||
has_primary = False
|
||||
|
||||
for field_name, field_klass in dct.items():
|
||||
if not inspect.isclass(field_klass):
|
||||
continue
|
||||
if issubclass(field_klass, DBField):
|
||||
dct['_meta']['fields'][field_name] = field_klass
|
||||
|
||||
if field_klass.is_primary:
|
||||
if has_primary:
|
||||
raise errors.SolarError('Object cannot have 2 primary fields.')
|
||||
|
||||
has_primary = True
|
||||
|
||||
dct['_meta']['primary'] = field_name
|
||||
elif issubclass(field_klass, DBRelatedField):
|
||||
dct['_meta']['related_to'][field_name] = field_klass
|
||||
|
||||
if not has_primary:
|
||||
raise errors.SolarError('Object needs to have a primary field.')
|
||||
|
||||
klass = super(DBObjectMeta, cls).__new__(cls, name, parents, dct)
|
||||
|
||||
# Support for self-references in relations
|
||||
for field_name, field_klass in klass._meta['related_to'].items():
|
||||
field_klass.source_db_class = klass
|
||||
if field_klass.destination_db_class == klass.__name__:
|
||||
field_klass.destination_db_class = klass
|
||||
|
||||
return klass
|
||||
|
||||
|
||||
class DBObject(object):
|
||||
# Enum from BaseGraphDB.COLLECTIONS
|
||||
_collection = None
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
wrong_fields = set(kwargs) - set(self._meta['fields'])
|
||||
if wrong_fields:
|
||||
raise errors.SolarError(
|
||||
'Unknown fields {}'.format(wrong_fields)
|
||||
)
|
||||
|
||||
self._fields = {}
|
||||
|
||||
for field_name, field_klass in self._meta['fields'].items():
|
||||
value = kwargs.get(field_name, field_klass.default_value)
|
||||
|
||||
self._fields[field_name] = field_klass(field_name, value=value)
|
||||
|
||||
self._related_to = {}
|
||||
|
||||
for field_name, field_klass in self._meta['related_to'].items():
|
||||
inst = field_klass(field_name, self)
|
||||
self._related_to[field_name] = inst
|
||||
|
||||
self._update_values()
|
||||
|
||||
def __eq__(self, inst):
|
||||
# NOTE: don't compare related fields
|
||||
self._update_fields_values()
|
||||
return self._fields == inst._fields
|
||||
|
||||
def __ne__(self, inst):
|
||||
return not self.__eq__(inst)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self._db_key)
|
||||
|
||||
def _update_fields_values(self):
|
||||
"""Copy values from self to self._fields."""
|
||||
|
||||
for field in self._fields.values():
|
||||
field.value = getattr(self, field.name)
|
||||
|
||||
def _update_values(self):
|
||||
"""
|
||||
Reverse of _update_fields_values, i.e. copy values from self._fields to
|
||||
self."""
|
||||
|
||||
for field in self._fields.values():
|
||||
setattr(self, field.name, field.value)
|
||||
|
||||
for field in self._related_to.values():
|
||||
setattr(self, field.name, field)
|
||||
|
||||
@property
|
||||
def _db_key(self):
|
||||
"""Key for the DB document (in KV-store).
|
||||
|
||||
You can overwrite this with custom keys."""
|
||||
if not self._primary_field.value:
|
||||
setattr(self, self._primary_field.name, unicode(uuid.uuid4()))
|
||||
self._update_fields_values()
|
||||
return self._primary_field.value
|
||||
|
||||
@property
|
||||
def _primary_field(self):
|
||||
return self._fields[self._meta['primary']]
|
||||
|
||||
@property
|
||||
def _db_node(self):
|
||||
try:
|
||||
return db.get(self._db_key, collection=self._collection)
|
||||
except KeyError:
|
||||
return
|
||||
|
||||
def validate(self):
|
||||
self._update_fields_values()
|
||||
for field in self._fields.values():
|
||||
if field.schema_in_field is not None:
|
||||
field.schema = self._fields[field.schema_in_field].value
|
||||
field.validate()
|
||||
|
||||
def to_dict(self):
|
||||
self._update_fields_values()
|
||||
return {
|
||||
f.name: f.value for f in self._fields.values()
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def load(cls, key):
|
||||
r = db.get(key, collection=cls._collection)
|
||||
return cls(**r.properties)
|
||||
|
||||
@classmethod
|
||||
def load_all(cls):
|
||||
rs = db.all(collection=cls._collection)
|
||||
|
||||
return [cls(**r.properties) for r in rs]
|
||||
|
||||
def save(self):
|
||||
db.create(
|
||||
self._db_key,
|
||||
properties=self.to_dict(),
|
||||
collection=self._collection
|
||||
)
|
||||
|
||||
def delete(self):
|
||||
db.delete(
|
||||
self._db_key,
|
||||
collection=self._collection
|
||||
)
|
||||
|
||||
|
||||
class DBResourceInput(DBObject):
|
||||
__metaclass__ = DBObjectMeta
|
||||
|
||||
_collection = base.BaseGraphDB.COLLECTIONS.input
|
||||
|
||||
id = db_field(schema='str!', is_primary=True)
|
||||
name = db_field(schema='str!')
|
||||
schema = db_field()
|
||||
value = db_field(schema_in_field='schema')
|
||||
is_list = db_field(schema='bool!', default_value=False)
|
||||
is_hash = db_field(schema='bool!', default_value=False)
|
||||
|
||||
receivers = db_related_field(base.BaseGraphDB.RELATION_TYPES.input_to_input,
|
||||
'DBResourceInput')
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return DBResource(
|
||||
**db.get_relations(
|
||||
dest=self._db_node,
|
||||
type_=base.BaseGraphDB.RELATION_TYPES.resource_input
|
||||
)[0].start_node.properties
|
||||
)
|
||||
|
||||
def save(self):
|
||||
self.backtrack_value_emitter._cache_del(self.id)
|
||||
return super(DBResourceInput, self).save()
|
||||
|
||||
def delete(self):
|
||||
db.delete_relations(
|
||||
source=self._db_node,
|
||||
type_=base.BaseGraphDB.RELATION_TYPES.input_to_input
|
||||
)
|
||||
db.delete_relations(
|
||||
dest=self._db_node,
|
||||
type_=base.BaseGraphDB.RELATION_TYPES.input_to_input
|
||||
)
|
||||
self.backtrack_value_emitter._cache_del(self.id)
|
||||
super(DBResourceInput, self).delete()
|
||||
|
||||
def edges(self):
|
||||
|
||||
out = db.get_relations(
|
||||
source=self._db_node,
|
||||
type_=base.BaseGraphDB.RELATION_TYPES.input_to_input)
|
||||
incoming = db.get_relations(
|
||||
dest=self._db_node,
|
||||
type_=base.BaseGraphDB.RELATION_TYPES.input_to_input)
|
||||
for relation in out + incoming:
|
||||
meta = relation.properties
|
||||
source = DBResourceInput(**relation.start_node.properties)
|
||||
dest = DBResourceInput(**relation.end_node.properties)
|
||||
yield source, dest, meta
|
||||
|
||||
def check_other_val(self, other_val=None):
|
||||
if not other_val:
|
||||
return self
|
||||
res = self.resource
|
||||
# TODO: needs to be refactored a lot to be more effective.
|
||||
# We don't have way of getting single input / value for given resource.
|
||||
inps = {i.name: i for i in res.inputs.as_set()}
|
||||
correct_input = inps[other_val]
|
||||
return correct_input.backtrack_value()
|
||||
|
||||
@cache_me({})
|
||||
def backtrack_value_emitter(self, level=None, other_val=None):
|
||||
# TODO: this is actually just fetching head element in linked list
|
||||
# so this whole algorithm can be moved to the db backend probably
|
||||
# TODO: cycle detection?
|
||||
# TODO: write this as a Cypher query? Move to DB?
|
||||
if level is not None and other_val is not None:
|
||||
raise Exception("Not supported yet")
|
||||
|
||||
if level == 0:
|
||||
return self
|
||||
|
||||
def backtrack_func(i):
|
||||
if level is None:
|
||||
return i.backtrack_value_emitter(other_val=other_val)
|
||||
|
||||
return i.backtrack_value_emitter(level=level - 1, other_val=other_val)
|
||||
|
||||
inputs = self.receivers.sources(self)
|
||||
relations = self.receivers.all_by_dest(self)
|
||||
source_class = self.receivers.source_db_class
|
||||
|
||||
if not inputs:
|
||||
return self.check_other_val(other_val)
|
||||
|
||||
# if lazy_val is None:
|
||||
# return self.value
|
||||
# print self.resource.name
|
||||
# print [x.name for x in self.resource.inputs.as_set()]
|
||||
# _input = next(x for x in self.resource.inputs.as_set() if x.name == lazy_val)
|
||||
# return _input.backtrack_value()
|
||||
# # return self.value
|
||||
if self.is_list:
|
||||
if not self.is_hash:
|
||||
return [backtrack_func(i) for i in inputs]
|
||||
|
||||
# NOTE: we return a list of values, but we need to group them
|
||||
# hence this dict here
|
||||
# NOTE: grouping is done by resource.name by default, but this
|
||||
# can be overwritten by the 'tag' property in relation
|
||||
ret = {}
|
||||
|
||||
for r in relations:
|
||||
source = source_class(**r.start_node.properties)
|
||||
tag = r.properties['tag']
|
||||
ret.setdefault(tag, {})
|
||||
key = r.properties['destination_key']
|
||||
value = backtrack_func(source)
|
||||
|
||||
ret[tag].update({key: value})
|
||||
|
||||
return ret.values()
|
||||
elif self.is_hash:
|
||||
ret = self.value or {}
|
||||
for r in relations:
|
||||
source = source_class(
|
||||
**r.start_node.properties
|
||||
)
|
||||
# NOTE: hard way to do this, what if there are more relations
|
||||
# and some of them do have destination_key while others
|
||||
# don't?
|
||||
if 'destination_key' not in r.properties:
|
||||
return backtrack_func(source)
|
||||
key = r.properties['destination_key']
|
||||
ret[key] = backtrack_func(source)
|
||||
return ret
|
||||
|
||||
return backtrack_func(inputs.pop())
|
||||
|
||||
def parse_backtracked_value(self, v):
|
||||
if isinstance(v, DBResourceInput):
|
||||
return v.value
|
||||
|
||||
if isinstance(v, list):
|
||||
return [self.parse_backtracked_value(vv) for vv in v]
|
||||
|
||||
if isinstance(v, dict):
|
||||
return {
|
||||
k: self.parse_backtracked_value(vv) for k, vv in v.items()
|
||||
}
|
||||
|
||||
return v
|
||||
|
||||
def backtrack_value(self, other_val=None):
|
||||
return self.parse_backtracked_value(self.backtrack_value_emitter(other_val=other_val))
|
||||
|
||||
|
||||
class DBEvent(DBObject):
|
||||
|
||||
__metaclass__ = DBObjectMeta
|
||||
|
||||
_collection = base.BaseGraphDB.COLLECTIONS.events
|
||||
|
||||
id = db_field(is_primary=True)
|
||||
parent = db_field(schema='str!')
|
||||
parent_action = db_field(schema='str!')
|
||||
etype = db_field('str!')
|
||||
state = db_field('str')
|
||||
child = db_field('str')
|
||||
child_action = db_field('str')
|
||||
|
||||
def delete(self):
|
||||
db.delete_relations(
|
||||
dest=self._db_node,
|
||||
type_=base.BaseGraphDB.RELATION_TYPES.resource_event
|
||||
)
|
||||
super(DBEvent, self).delete()
|
||||
|
||||
|
||||
class DBResourceEvents(DBObject):
|
||||
|
||||
__metaclass__ = DBObjectMeta
|
||||
|
||||
_collection = base.BaseGraphDB.COLLECTIONS.resource_events
|
||||
|
||||
id = db_field(schema='str!', is_primary=True)
|
||||
events = db_related_field(base.BaseGraphDB.RELATION_TYPES.resource_event,
|
||||
DBEvent)
|
||||
|
||||
@classmethod
|
||||
def get_or_create(cls, name):
|
||||
r = db.get_or_create(
|
||||
name,
|
||||
properties={'id': name},
|
||||
collection=cls._collection)
|
||||
return cls(**r.properties)
|
||||
|
||||
|
||||
class DBCommitedState(DBObject):
|
||||
|
||||
__metaclass__ = DBObjectMeta
|
||||
|
||||
_collection = base.BaseGraphDB.COLLECTIONS.state_data
|
||||
|
||||
id = db_field(schema='str!', is_primary=True)
|
||||
inputs = db_field(schema={}, default_value={})
|
||||
connections = db_field(schema=[], default_value=[])
|
||||
base_path = db_field(schema='str')
|
||||
tags = db_field(schema=[], default_value=[])
|
||||
state = db_field(schema='str', default_value='removed')
|
||||
|
||||
@classmethod
|
||||
def get_or_create(cls, name):
|
||||
r = db.get_or_create(
|
||||
name,
|
||||
properties={'id': name},
|
||||
collection=cls._collection)
|
||||
return cls(**r.properties)
|
||||
|
||||
|
||||
class DBResource(DBObject):
|
||||
__metaclass__ = DBObjectMeta
|
||||
|
||||
_collection = base.BaseGraphDB.COLLECTIONS.resource
|
||||
|
||||
id = db_field(schema='str', is_primary=True)
|
||||
name = db_field(schema='str!')
|
||||
actions_path = db_field(schema='str')
|
||||
actions = db_field(schema={}, default_value={})
|
||||
base_name = db_field(schema='str')
|
||||
base_path = db_field(schema='str')
|
||||
handler = db_field(schema='str') # one of: {'ansible_playbook', 'ansible_template', 'puppet', etc}
|
||||
puppet_module = db_field(schema='str')
|
||||
version = db_field(schema='str')
|
||||
tags = db_field(schema=[], default_value=[])
|
||||
meta_inputs = db_field(schema={}, default_value={})
|
||||
state = db_field(schema='str')
|
||||
|
||||
inputs = db_related_field(base.BaseGraphDB.RELATION_TYPES.resource_input,
|
||||
DBResourceInput)
|
||||
|
||||
def add_input(self, name, schema, value):
|
||||
# NOTE: Inputs need to have uuid added because there can be many
|
||||
# inputs with the same name
|
||||
uid = '{}-{}'.format(name, uuid.uuid4())
|
||||
input = DBResourceInput(id=uid,
|
||||
name=name,
|
||||
schema=schema,
|
||||
value=value,
|
||||
is_list=isinstance(schema, list),
|
||||
is_hash=isinstance(schema, dict) or (isinstance(schema, list) and len(schema) > 0 and isinstance(schema[0], dict)))
|
||||
input.save()
|
||||
|
||||
self.inputs.add(input)
|
||||
|
||||
def add_event(self, action, state, etype, child, child_action):
|
||||
event = DBEvent(
|
||||
parent=self.name,
|
||||
parent_action=action,
|
||||
state=state,
|
||||
etype=etype,
|
||||
child=child,
|
||||
child_action=child_action
|
||||
)
|
||||
event.save()
|
||||
self.events.add(event)
|
||||
|
||||
def delete(self):
|
||||
for input in self.inputs.as_set():
|
||||
self.inputs.remove(input)
|
||||
input.delete()
|
||||
super(DBResource, self).delete()
|
||||
|
||||
def graph(self):
|
||||
mdg = networkx.MultiDiGraph()
|
||||
for input in self.inputs.as_list():
|
||||
mdg.add_edges_from(input.edges())
|
||||
return mdg
|
||||
|
||||
def add_tags(self, *tags):
|
||||
self.tags = list(set(self.tags) | set(tags))
|
||||
self.save()
|
||||
|
||||
def remove_tags(self, *tags):
|
||||
self.tags = list(set(self.tags) - set(tags))
|
||||
self.save()
|
||||
|
||||
# TODO: remove this
|
||||
if __name__ == '__main__':
|
||||
r = DBResource(name=1)
|
||||
r.validate()
|
@ -23,35 +23,53 @@ from solar import errors
|
||||
|
||||
from collections import Counter
|
||||
|
||||
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
db = get_db()
|
||||
from solar.dblayer.solar_models import Task
|
||||
from solar.dblayer.model import clear_cache
|
||||
|
||||
|
||||
def save_graph(graph):
|
||||
# maybe it is possible to store part of information in AsyncResult backend
|
||||
uid = graph.graph['uid']
|
||||
db.create(uid, graph.graph, db.COLLECTIONS.plan_graph)
|
||||
|
||||
for n in nx.topological_sort(graph):
|
||||
t = Task.new(
|
||||
{'name': n,
|
||||
'execution': uid,
|
||||
'status': graph.node[n].get('status', ''),
|
||||
'target': graph.node[n].get('target', '') or '',
|
||||
'task_type': graph.node[n].get('type', ''),
|
||||
'args': graph.node[n].get('args', []),
|
||||
'errmsg': graph.node[n].get('errmsg', '') or ''})
|
||||
graph.node[n]['task'] = t
|
||||
for pred in graph.predecessors(n):
|
||||
pred_task = graph.node[pred]['task']
|
||||
t.parents.add(pred_task)
|
||||
pred_task.save()
|
||||
t.save()
|
||||
|
||||
|
||||
def update_graph(graph):
|
||||
for n in graph:
|
||||
collection = db.COLLECTIONS.plan_node.name + ':' + uid
|
||||
db.create(n, properties=graph.node[n], collection=collection)
|
||||
db.create_relation_str(uid, n, type_=db.RELATION_TYPES.graph_to_node)
|
||||
|
||||
for u, v, properties in graph.edges(data=True):
|
||||
type_ = db.RELATION_TYPES.plan_edge.name + ':' + uid
|
||||
db.create_relation_str(u, v, properties, type_=type_)
|
||||
task = graph.node[n]['task']
|
||||
task.status = graph.node[n]['status']
|
||||
task.errmsg = graph.node[n]['errmsg'] or ''
|
||||
task.save()
|
||||
|
||||
|
||||
def get_graph(uid):
|
||||
dg = nx.MultiDiGraph()
|
||||
collection = db.COLLECTIONS.plan_node.name + ':' + uid
|
||||
type_ = db.RELATION_TYPES.plan_edge.name + ':' + uid
|
||||
dg.graph = db.get(uid, collection=db.COLLECTIONS.plan_graph).properties
|
||||
dg.add_nodes_from([(n.uid, n.properties) for n in db.all(collection=collection)])
|
||||
dg.add_edges_from([(i['source'], i['dest'], i['properties'])
|
||||
for i in db.all_relations(type_=type_, db_convert=False)])
|
||||
dg.graph['uid'] = uid
|
||||
dg.graph['name'] = uid.split(':')[0]
|
||||
tasks = map(Task.get, Task.execution.filter(uid))
|
||||
for t in tasks:
|
||||
dg.add_node(
|
||||
t.name, status=t.status,
|
||||
type=t.task_type, args=t.args,
|
||||
target=t.target or None,
|
||||
errmsg=t.errmsg or None,
|
||||
task=t)
|
||||
for u in t.parents.all_names():
|
||||
dg.add_edge(u, t.name)
|
||||
return dg
|
||||
|
||||
|
||||
@ -67,7 +85,7 @@ def parse_plan(plan_path):
|
||||
for task in plan['tasks']:
|
||||
defaults = {
|
||||
'status': 'PENDING',
|
||||
'errmsg': None,
|
||||
'errmsg': '',
|
||||
}
|
||||
defaults.update(task['parameters'])
|
||||
dg.add_node(
|
||||
@ -111,24 +129,6 @@ def create_plan(plan_path, save=True):
|
||||
return create_plan_from_graph(dg, save=save)
|
||||
|
||||
|
||||
def update_plan(uid, plan_path):
|
||||
"""update preserves old status of tasks if they werent removed
|
||||
"""
|
||||
|
||||
new = parse_plan(plan_path)
|
||||
old = get_graph(uid)
|
||||
return update_plan_from_graph(new, old).graph['uid']
|
||||
|
||||
|
||||
def update_plan_from_graph(new, old):
|
||||
new.graph = old.graph
|
||||
for n in new:
|
||||
if n in old:
|
||||
new.node[n]['status'] = old.node[n]['status']
|
||||
|
||||
save_graph(new)
|
||||
return new
|
||||
|
||||
|
||||
def reset_by_uid(uid, state_list=None):
|
||||
dg = get_graph(uid)
|
||||
@ -139,7 +139,7 @@ def reset(graph, state_list=None):
|
||||
for n in graph:
|
||||
if state_list is None or graph.node[n]['status'] in state_list:
|
||||
graph.node[n]['status'] = states.PENDING.name
|
||||
save_graph(graph)
|
||||
update_graph(graph)
|
||||
|
||||
|
||||
def reset_filtered(uid):
|
||||
@ -170,6 +170,8 @@ def wait_finish(uid, timeout):
|
||||
start_time = time.time()
|
||||
|
||||
while start_time + timeout >= time.time():
|
||||
# need to clear cache before fetching updated status
|
||||
clear_cache()
|
||||
dg = get_graph(uid)
|
||||
summary = Counter()
|
||||
summary.update({s.name: 0 for s in states})
|
||||
@ -177,6 +179,7 @@ def wait_finish(uid, timeout):
|
||||
yield summary
|
||||
if summary[states.PENDING.name] + summary[states.INPROGRESS.name] == 0:
|
||||
return
|
||||
|
||||
else:
|
||||
raise errors.ExecutionTimeout(
|
||||
'Run %s wasnt able to finish' % uid)
|
||||
|
@ -14,9 +14,13 @@
|
||||
|
||||
from celery import Celery
|
||||
|
||||
from solar.config import C
|
||||
|
||||
_url = 'redis://{}:{}/1'.format(C.redis.host, C.redis.port)
|
||||
|
||||
app = Celery(
|
||||
include=['solar.system_log.tasks', 'solar.orchestration.tasks'],
|
||||
backend='redis://10.0.0.2:6379/1',
|
||||
broker='redis://10.0.0.2:6379/1')
|
||||
backend=_url,
|
||||
broker=_url)
|
||||
app.conf.update(CELERY_ACCEPT_CONTENT = ['json'])
|
||||
app.conf.update(CELERY_TASK_SERIALIZER = 'json')
|
||||
|
@ -17,7 +17,7 @@ import subprocess
|
||||
import time
|
||||
|
||||
from celery.app import task
|
||||
import redis
|
||||
from celery.signals import task_prerun, task_postrun
|
||||
|
||||
from solar.orchestration import graph
|
||||
from solar.core import actions
|
||||
@ -27,9 +27,7 @@ from solar.orchestration.runner import app
|
||||
from solar.orchestration.traversal import traverse
|
||||
from solar.orchestration import limits
|
||||
from solar.orchestration import executor
|
||||
|
||||
|
||||
r = redis.StrictRedis(host='10.0.0.2', port=6379, db=1)
|
||||
from solar.dblayer import ModelMeta
|
||||
|
||||
|
||||
__all__ = ['solar_resource', 'cmd', 'sleep',
|
||||
@ -56,6 +54,14 @@ class ReportTask(task.Task):
|
||||
|
||||
report_task = partial(app.task, base=ReportTask, bind=True)
|
||||
|
||||
@task_prerun.connect
|
||||
def start_solar_session(task_id, task, *args, **kwargs):
|
||||
ModelMeta.session_start()
|
||||
|
||||
@task_postrun.connect
|
||||
def end_solar_session(task_id, task, *args, **kwargs):
|
||||
ModelMeta.session_end()
|
||||
|
||||
|
||||
@report_task(name='solar_resource')
|
||||
def solar_resource(ctxt, resource_name, action):
|
||||
@ -126,7 +132,7 @@ def schedule(plan_uid, dg):
|
||||
tasks)
|
||||
execution = executor.celery_executor(
|
||||
dg, limit_chain, control_tasks=('fault_tolerance',))
|
||||
graph.save_graph(dg)
|
||||
graph.update_graph(dg)
|
||||
execution()
|
||||
|
||||
|
||||
@ -147,8 +153,7 @@ def soft_stop(plan_uid):
|
||||
for n in dg:
|
||||
if dg.node[n]['status'] == 'PENDING':
|
||||
dg.node[n]['status'] = 'SKIPPED'
|
||||
graph.save_graph(dg)
|
||||
|
||||
graph.update_graph(dg)
|
||||
|
||||
@app.task(name='schedule_next')
|
||||
def schedule_next(task_id, status, errmsg=None):
|
||||
|
@ -15,21 +15,19 @@
|
||||
import dictdiffer
|
||||
import networkx as nx
|
||||
|
||||
from solar.system_log import data
|
||||
from solar.core.log import log
|
||||
from solar.core import signals
|
||||
from solar.core import resource
|
||||
from solar import utils
|
||||
from solar.interfaces.db import get_db
|
||||
from solar.system_log import data
|
||||
|
||||
from solar.orchestration import graph
|
||||
from solar.events import api as evapi
|
||||
from solar.interfaces import orm
|
||||
from .consts import CHANGES
|
||||
from solar.core.resource.resource import RESOURCE_STATE
|
||||
from solar.errors import CannotFindID
|
||||
|
||||
db = get_db()
|
||||
|
||||
from solar.dblayer.solar_models import Resource, LogItem, CommitedResource, StrInt
|
||||
|
||||
def guess_action(from_, to):
|
||||
# NOTE(dshulyak) imo the way to solve this - is dsl for orchestration,
|
||||
@ -47,14 +45,14 @@ def create_diff(staged, commited):
|
||||
|
||||
|
||||
def create_logitem(resource, action, diffed, connections_diffed,
|
||||
base_path=None):
|
||||
return data.LogItem(
|
||||
utils.generate_uuid(),
|
||||
resource,
|
||||
action,
|
||||
diffed,
|
||||
connections_diffed,
|
||||
base_path=base_path)
|
||||
base_path=''):
|
||||
return LogItem.new(
|
||||
{'resource': resource,
|
||||
'action': action,
|
||||
'diff': diffed,
|
||||
'connections_diff': connections_diffed,
|
||||
'base_path': base_path,
|
||||
'log': 'staged'})
|
||||
|
||||
|
||||
def create_sorted_diff(staged, commited):
|
||||
@ -63,43 +61,52 @@ def create_sorted_diff(staged, commited):
|
||||
return create_diff(staged, commited)
|
||||
|
||||
|
||||
def make_single_stage_item(resource_obj):
|
||||
commited = resource_obj.load_commited()
|
||||
base_path = resource_obj.base_path
|
||||
|
||||
if resource_obj.to_be_removed():
|
||||
resource_args = {}
|
||||
resource_connections = []
|
||||
else:
|
||||
resource_args = resource_obj.args
|
||||
resource_connections = resource_obj.connections
|
||||
|
||||
if commited.state == RESOURCE_STATE.removed.name:
|
||||
commited_args = {}
|
||||
commited_connections = []
|
||||
else:
|
||||
commited_args = commited.inputs
|
||||
commited_connections = commited.connections
|
||||
|
||||
inputs_diff = create_diff(resource_args, commited_args)
|
||||
connections_diff = create_sorted_diff(
|
||||
resource_connections, commited_connections)
|
||||
|
||||
# if new connection created it will be reflected in inputs
|
||||
# but using inputs to reverse connections is not possible
|
||||
if inputs_diff:
|
||||
li = create_logitem(
|
||||
resource_obj.name,
|
||||
guess_action(commited_args, resource_args),
|
||||
inputs_diff,
|
||||
connections_diff,
|
||||
base_path=base_path)
|
||||
li.save()
|
||||
return li
|
||||
return None
|
||||
|
||||
|
||||
def stage_changes():
|
||||
log = data.SL()
|
||||
log.clean()
|
||||
for li in data.SL():
|
||||
li.delete()
|
||||
|
||||
for resouce_obj in resource.load_all():
|
||||
commited = resouce_obj.load_commited()
|
||||
base_path = resouce_obj.base_path
|
||||
if resouce_obj.to_be_removed():
|
||||
resource_args = {}
|
||||
resource_connections = []
|
||||
else:
|
||||
resource_args = resouce_obj.args
|
||||
resource_connections = resouce_obj.connections
|
||||
|
||||
if commited.state == RESOURCE_STATE.removed.name:
|
||||
commited_args = {}
|
||||
commited_connections = []
|
||||
else:
|
||||
commited_args = commited.inputs
|
||||
commited_connections = commited.connections
|
||||
|
||||
inputs_diff = create_diff(resource_args, commited_args)
|
||||
connections_diff = create_sorted_diff(
|
||||
resource_connections, commited_connections)
|
||||
|
||||
# if new connection created it will be reflected in inputs
|
||||
# but using inputs to reverse connections is not possible
|
||||
if inputs_diff:
|
||||
log_item = create_logitem(
|
||||
resouce_obj.name,
|
||||
guess_action(commited_args, resource_args),
|
||||
inputs_diff,
|
||||
connections_diff,
|
||||
base_path=base_path)
|
||||
log.append(log_item)
|
||||
return log
|
||||
last = LogItem.history_last()
|
||||
since = StrInt.greater(last.updated) if last else None
|
||||
staged_log = utils.solar_map(make_single_stage_item,
|
||||
resource.load_updated(since), concurrency=10)
|
||||
staged_log = filter(None, staged_log)
|
||||
return staged_log
|
||||
|
||||
|
||||
def send_to_orchestration():
|
||||
@ -108,10 +115,10 @@ def send_to_orchestration():
|
||||
changed_nodes = []
|
||||
|
||||
for logitem in data.SL():
|
||||
events[logitem.res] = evapi.all_events(logitem.res)
|
||||
changed_nodes.append(logitem.res)
|
||||
events[logitem.resource] = evapi.all_events(logitem.resource)
|
||||
changed_nodes.append(logitem.resource)
|
||||
|
||||
state_change = evapi.StateChange(logitem.res, logitem.action)
|
||||
state_change = evapi.StateChange(logitem.resource, logitem.action)
|
||||
state_change.insert(changed_nodes, dg)
|
||||
|
||||
evapi.build_edges(dg, events)
|
||||
@ -123,29 +130,26 @@ def send_to_orchestration():
|
||||
|
||||
def parameters(res, action, data):
|
||||
return {'args': [res, action],
|
||||
'type': 'solar_resource',
|
||||
# unique identifier for a node should be passed
|
||||
'target': data.get('ip')}
|
||||
'type': 'solar_resource'}
|
||||
|
||||
|
||||
def check_uids_present(log, uids):
|
||||
not_valid = []
|
||||
for uid in uids:
|
||||
if log.get(uid) is None:
|
||||
not_valid.append(uid)
|
||||
if not_valid:
|
||||
raise CannotFindID('UIDS: {} not in history.'.format(not_valid))
|
||||
|
||||
def _get_args_to_update(args, connections):
|
||||
"""For each resource we can update only args that are not provided
|
||||
by connections
|
||||
"""
|
||||
inherited = [i[3].split(':')[0] for i in connections]
|
||||
return {
|
||||
key:args[key] for key in args
|
||||
if key not in inherited
|
||||
}
|
||||
|
||||
def revert_uids(uids):
|
||||
"""
|
||||
:param uids: iterable not generator
|
||||
"""
|
||||
history = data.CL()
|
||||
check_uids_present(history, uids)
|
||||
items = LogItem.multi_get(uids)
|
||||
|
||||
for uid in uids:
|
||||
item = history.get(uid)
|
||||
for item in items:
|
||||
|
||||
if item.action == CHANGES.update.name:
|
||||
_revert_update(item)
|
||||
@ -161,10 +165,12 @@ def revert_uids(uids):
|
||||
def _revert_remove(logitem):
|
||||
"""Resource should be created with all previous connections
|
||||
"""
|
||||
commited = orm.DBCommitedState.load(logitem.res)
|
||||
commited = CommitedResource.get(logitem.resource)
|
||||
args = dictdiffer.revert(logitem.diff, commited.inputs)
|
||||
connections = dictdiffer.revert(logitem.signals_diff, sorted(commited.connections))
|
||||
resource.Resource(logitem.res, logitem.base_path, args=args, tags=commited.tags)
|
||||
connections = dictdiffer.revert(logitem.connections_diff, sorted(commited.connections))
|
||||
|
||||
resource.Resource(logitem.resource, logitem.base_path,
|
||||
args=_get_args_to_update(args, connections), tags=commited.tags)
|
||||
for emitter, emitter_input, receiver, receiver_input in connections:
|
||||
emmiter_obj = resource.load(emitter)
|
||||
receiver_obj = resource.load(receiver)
|
||||
@ -172,8 +178,6 @@ def _revert_remove(logitem):
|
||||
|
||||
|
||||
def _update_inputs_connections(res_obj, args, old_connections, new_connections):
|
||||
res_obj.update(args)
|
||||
|
||||
|
||||
removed = []
|
||||
for item in old_connections:
|
||||
@ -188,30 +192,36 @@ def _update_inputs_connections(res_obj, args, old_connections, new_connections):
|
||||
for emitter, _, receiver, _ in removed:
|
||||
emmiter_obj = resource.load(emitter)
|
||||
receiver_obj = resource.load(receiver)
|
||||
signals.disconnect(emmiter_obj, receiver_obj)
|
||||
|
||||
emmiter_obj.disconnect(receiver_obj)
|
||||
|
||||
for emitter, emitter_input, receiver, receiver_input in added:
|
||||
emmiter_obj = resource.load(emitter)
|
||||
receiver_obj = resource.load(receiver)
|
||||
signals.connect(emmiter_obj, receiver_obj, {emitter_input: receiver_input})
|
||||
emmiter_obj.connect(receiver_obj, {emitter_input: receiver_input})
|
||||
|
||||
if removed or added:
|
||||
# TODO without save we will get error that some values can not be updated
|
||||
# even if connection was removed
|
||||
receiver_obj.db_obj.save()
|
||||
|
||||
res_obj.update(args)
|
||||
|
||||
|
||||
def _revert_update(logitem):
|
||||
"""Revert of update should update inputs and connections
|
||||
"""
|
||||
res_obj = resource.load(logitem.res)
|
||||
res_obj = resource.load(logitem.resource)
|
||||
commited = res_obj.load_commited()
|
||||
|
||||
args_to_update = dictdiffer.revert(logitem.diff, commited.inputs)
|
||||
connections = dictdiffer.revert(logitem.signals_diff, sorted(commited.connections))
|
||||
connections = dictdiffer.revert(logitem.connections_diff, sorted(commited.connections))
|
||||
args = dictdiffer.revert(logitem.diff, commited.inputs)
|
||||
|
||||
_update_inputs_connections(
|
||||
res_obj, args_to_update, commited.connections, connections)
|
||||
res_obj, _get_args_to_update(args, connections), commited.connections, connections)
|
||||
|
||||
|
||||
def _revert_run(logitem):
|
||||
res_obj = resource.load(logitem.res)
|
||||
res_obj = resource.load(logitem.resource)
|
||||
res_obj.remove()
|
||||
|
||||
|
||||
@ -220,27 +230,26 @@ def revert(uid):
|
||||
|
||||
|
||||
def _discard_remove(item):
|
||||
resource_obj = resource.load(item.res)
|
||||
resource_obj = resource.load(item.resource)
|
||||
resource_obj.set_created()
|
||||
|
||||
|
||||
def _discard_update(item):
|
||||
resource_obj = resource.load(item.res)
|
||||
resource_obj = resource.load(item.resource)
|
||||
old_connections = resource_obj.connections
|
||||
new_connections = dictdiffer.revert(item.signals_diff, sorted(old_connections))
|
||||
new_connections = dictdiffer.revert(item.connections_diff, sorted(old_connections))
|
||||
args = dictdiffer.revert(item.diff, resource_obj.args)
|
||||
|
||||
_update_inputs_connections(
|
||||
resource_obj, args, old_connections, new_connections)
|
||||
resource_obj, _get_args_to_update(args, new_connections), old_connections, new_connections)
|
||||
|
||||
def _discard_run(item):
|
||||
resource.load(item.res).remove(force=True)
|
||||
resource.load(item.resource).remove(force=True)
|
||||
|
||||
|
||||
def discard_uids(uids):
|
||||
staged_log = data.SL()
|
||||
check_uids_present(staged_log, uids)
|
||||
for uid in uids:
|
||||
item = staged_log.get(uid)
|
||||
items = LogItem.multi_get(uids)
|
||||
for item in items:
|
||||
if item.action == CHANGES.update.name:
|
||||
_discard_update(item)
|
||||
elif item.action == CHANGES.remove.name:
|
||||
@ -250,7 +259,7 @@ def discard_uids(uids):
|
||||
else:
|
||||
log.debug('Action %s for resource %s is a side'
|
||||
' effect of another action', item.action, item.res)
|
||||
staged_log.pop(uid)
|
||||
item.delete()
|
||||
|
||||
|
||||
def discard_uid(uid):
|
||||
@ -267,3 +276,7 @@ def commit_all():
|
||||
from .operations import move_to_commited
|
||||
for item in data.SL():
|
||||
move_to_commited(item.log_action)
|
||||
|
||||
def clear_history():
|
||||
LogItem.delete_all()
|
||||
CommitedResource.delete_all()
|
||||
|
@ -18,3 +18,6 @@ CHANGES = Enum(
|
||||
'Changes',
|
||||
'run remove update'
|
||||
)
|
||||
|
||||
|
||||
STATES = Enum('States', 'error inprogress pending success')
|
||||
|
@ -12,74 +12,23 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import collections
|
||||
from functools import partial
|
||||
|
||||
from solar import utils
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
from enum import Enum
|
||||
from solar.dblayer.solar_models import LogItem
|
||||
|
||||
|
||||
db = get_db()
|
||||
|
||||
def SL():
|
||||
rst = LogItem.composite.filter({'log': 'staged'})
|
||||
return LogItem.multi_get(rst)
|
||||
|
||||
def CL():
|
||||
rst = LogItem.composite.filter({'log': 'history'})
|
||||
return LogItem.multi_get(rst)
|
||||
|
||||
|
||||
STATES = Enum('States', 'error inprogress pending success')
|
||||
|
||||
|
||||
def state_file(name):
|
||||
if 'log' in name:
|
||||
return Log(name)
|
||||
|
||||
|
||||
SL = partial(state_file, 'stage_log')
|
||||
CL = partial(state_file, 'commit_log')
|
||||
|
||||
|
||||
class LogItem(object):
|
||||
|
||||
def __init__(self, uid, res, action, diff,
|
||||
signals_diff, state=None, base_path=None):
|
||||
self.uid = uid
|
||||
self.res = res
|
||||
self.log_action = '{}.{}'.format(res, action)
|
||||
self.action = action
|
||||
self.diff = diff
|
||||
self.signals_diff = signals_diff
|
||||
self.state = state or STATES.pending
|
||||
self.base_path = base_path
|
||||
|
||||
def to_yaml(self):
|
||||
return utils.yaml_dump(self.to_dict())
|
||||
|
||||
def to_dict(self):
|
||||
return {'uid': self.uid,
|
||||
'res': self.res,
|
||||
'diff': self.diff,
|
||||
'state': self.state.name,
|
||||
'signals_diff': self.signals_diff,
|
||||
'base_path': self.base_path,
|
||||
'action': self.action}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, **kwargs):
|
||||
state = getattr(STATES, kwargs.get('state', ''), STATES.pending)
|
||||
kwargs['state'] = state
|
||||
return cls(**kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return self.compact
|
||||
|
||||
def __repr__(self):
|
||||
return self.compact
|
||||
|
||||
@property
|
||||
def compact(self):
|
||||
return 'log task={} uid={}'.format(self.log_action, self.uid)
|
||||
|
||||
@property
|
||||
def details(self):
|
||||
return details(self.diff)
|
||||
def compact(logitem):
|
||||
return 'log task={} uid={}'.format(logitem.log_action, logitem.uid)
|
||||
|
||||
|
||||
def details(diff):
|
||||
@ -114,44 +63,3 @@ def unwrap_change_val(val):
|
||||
else:
|
||||
return val
|
||||
|
||||
|
||||
class Log(object):
|
||||
|
||||
def __init__(self, path):
|
||||
self.ordered_log = db.get_ordered_hash(path)
|
||||
|
||||
def append(self, logitem):
|
||||
self.ordered_log.add([(logitem.uid, logitem.to_dict())])
|
||||
|
||||
def pop(self, uid):
|
||||
item = self.get(uid)
|
||||
if not item:
|
||||
return None
|
||||
self.ordered_log.rem([uid])
|
||||
return item
|
||||
|
||||
def update(self, logitem):
|
||||
self.ordered_log.update(logitem.uid, logitem.to_dict())
|
||||
|
||||
def clean(self):
|
||||
self.ordered_log.clean()
|
||||
|
||||
def get(self, key):
|
||||
item = self.ordered_log.get(key)
|
||||
if item:
|
||||
return LogItem.from_dict(**item)
|
||||
return None
|
||||
|
||||
def collection(self, n=0):
|
||||
for item in self.ordered_log.reverse(n=n):
|
||||
yield LogItem.from_dict(**item)
|
||||
|
||||
def reverse(self, n=0):
|
||||
for item in self.ordered_log.list(n=n):
|
||||
yield LogItem.from_dict(**item)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.collection())
|
||||
|
||||
def __len__(self):
|
||||
return len(list(self.collection()))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user