diff --git a/.gitignore b/.gitignore index 596446eb..f635cdf9 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,4 @@ tmp/ state/ clients.json rs/ +x-venv/ diff --git a/example.py b/example.py index f3869ee0..d8585359 100644 --- a/example.py +++ b/example.py @@ -170,7 +170,6 @@ def deploy(): signals.Connections.flush() - has_errors = False for r in locals().values(): if not isinstance(r, resource.Resource): diff --git a/run_tests.sh b/run_tests.sh index 531344d0..fa31e702 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -20,7 +20,6 @@ pip install -r solar/requirements.txt --download-cache=/tmp/$JOB_NAME pushd solar/solar -PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_signals.py -PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_validation.py +PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE py.test test/ popd diff --git a/solar/requirements.txt b/solar/requirements.txt index 670b4eaf..a3d48744 100644 --- a/solar/requirements.txt +++ b/solar/requirements.txt @@ -10,3 +10,5 @@ mock dictdiffer==0.4.0 enum34==1.0.4 redis==2.10.3 +pytest +fakeredis diff --git a/solar/solar/core/resource.py b/solar/solar/core/resource.py index b3e7babf..4f36186e 100644 --- a/solar/solar/core/resource.py +++ b/solar/solar/core/resource.py @@ -24,9 +24,13 @@ class Resource(object): def __init__(self, name, metadata, args, tags=None): self.name = name self.metadata = metadata - self.actions = metadata['actions'].keys() if metadata['actions'] else None + self.actions = metadata.get('actions', {}).keys() or None self.args = {} + self.set_args(args) + self.changed = [] + self.tags = tags or [] + def set_args(self, args): for arg_name, arg_value in args.items(): if not self.metadata['input'].get(arg_name): continue @@ -39,11 +43,9 @@ class Resource(object): value = metadata_arg['value'] self.args[arg_name] = observer.create(type_, self, arg_name, value) - self.changed = [] - self.tags = tags or [] def __repr__(self): - return ("Resource(name='{name}', metadata={metadata}, args={args}, " + return ("Resource(name='{id}', metadata={metadata}, args={input}, " "tags={tags})").format(**self.to_dict()) def color_repr(self): @@ -51,8 +53,8 @@ class Resource(object): arg_color = 'yellow' - return ("{resource_s}({name_s}='{name}', {metadata_s}={metadata}, " - "{args_s}={args}, {tags_s}={tags})").format( + return ("{resource_s}({name_s}='{id}', {metadata_s}={metadata}, " + "{args_s}={input}, {tags_s}={tags})").format( resource_s=click.style('Resource', fg='white', bold=True), name_s=click.style('name', fg=arg_color, bold=True), metadata_s=click.style('metadata', fg=arg_color, bold=True), @@ -63,9 +65,9 @@ class Resource(object): def to_dict(self): return { - 'name': self.name, + 'id': self.name, 'metadata': self.metadata, - 'args': self.args_show(), + 'input': self.args_show(), 'tags': self.tags, } diff --git a/solar/solar/core/signals.py b/solar/solar/core/signals.py index 0b3a911d..04eca14f 100644 --- a/solar/solar/core/signals.py +++ b/solar/solar/core/signals.py @@ -110,10 +110,6 @@ class Connections(object): CLIENTS = {} - path = utils.read_config()[CLIENTS_CONFIG_KEY] - if os.path.exists(path): - os.remove(path) - @staticmethod def flush(): print 'FLUSHING Connections' diff --git a/solar/solar/interfaces/db/__init__.py b/solar/solar/interfaces/db/__init__.py index 92f519a6..306c3013 100644 --- a/solar/solar/interfaces/db/__init__.py +++ b/solar/solar/interfaces/db/__init__.py @@ -1,11 +1,10 @@ -from solar.interfaces.db.cached_file_system_db import CachedFileSystemDB -from solar.interfaces.db.file_system_db import FileSystemDB + from solar.interfaces.db.redis_db import RedisDB +from solar.interfaces.db.redis_db import FakeRedisDB mapping = { - 'cached_file_system': CachedFileSystemDB, - 'file_system': FileSystemDB, 'redis_db': RedisDB, + 'fakeredis_db': FakeRedisDB } DB = None diff --git a/solar/solar/interfaces/db/cached_file_system_db.py b/solar/solar/interfaces/db/cached_file_system_db.py deleted file mode 100644 index d5b8b06c..00000000 --- a/solar/solar/interfaces/db/cached_file_system_db.py +++ /dev/null @@ -1,110 +0,0 @@ -from solar.third_party.dir_dbm import DirDBM - -import atexit -import os -import types -import yaml - -from solar import utils -from solar import errors - - -class CachedFileSystemDB(DirDBM): - STORAGE_PATH = utils.read_config()['file-system-db']['storage-path'] - RESOURCE_COLLECTION_NAME = 'resource' - - _CACHE = {} - - def __init__(self): - utils.create_dir(self.STORAGE_PATH) - super(CachedFileSystemDB, self).__init__(self.STORAGE_PATH) - self.entities = {} - - atexit.register(self.flush) - - def __setitem__(self, k, v): - """ - C{dirdbm[k] = v} - Create or modify a textfile in this directory - @type k: strings @param k: key to setitem - @type v: strings @param v: value to associate with C{k} - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - # NOTE: Can be not a string if _writeFile in the child is redefined - # assert type(v) == types.StringType, "DirDBM value must be a string" - k = self._encode(k) - - # we create a new file with extension .new, write the data to it, and - # if the write succeeds delete the old file and rename the new one. - old = os.path.join(self.dname, k) - if os.path.exists(old): - new = old + ".rpl" # replacement entry - else: - new = old + ".new" # new entry - try: - self._writeFile(old, v) - except: - raise - - def get_resource(self, uid): - return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - - def get_obj_resource(self, uid): - from solar.core.resource import wrap_resource - raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - - return wrap_resource(raw_resource) - - def add_resource(self, uid, resource): - self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource - - def store(self, collection, obj): - if 'id' in obj: - self[self._make_key(collection, obj['id'])] = obj - else: - raise errors.CannotFindID('Cannot find id for object {0}'.format(obj)) - - def store_list(self, collection, objs): - for obj in objs: - self.store(collection, obj) - - def get_list(self, collection): - collection_keys = filter( - lambda k: k.startswith('{0}-'.format(collection)), - self.keys()) - - return map(lambda k: self[k], collection_keys) - - def get_record(self, collection, _id): - key = self._make_key(collection, _id) - if key not in self: - return None - - return self[key] - - def _make_key(self, collection, _id): - return '{0}-{1}'.format(collection, _id) - - def _readFile(self, path): - if path not in self._CACHE: - data = yaml.load(super(CachedFileSystemDB, self)._readFile(path)) - self._CACHE[path] = data - return data - - return self._CACHE[path] - - def _writeFile(self, path, data): - self._CACHE[path] = data - - def _encode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def _decode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def flush(self): - print 'FLUSHING DB' - for path, data in self._CACHE.items(): - super(CachedFileSystemDB, self)._writeFile(path, yaml.dump(data)) diff --git a/solar/solar/interfaces/db/file_system_db.py b/solar/solar/interfaces/db/file_system_db.py deleted file mode 100644 index 4c3c733e..00000000 --- a/solar/solar/interfaces/db/file_system_db.py +++ /dev/null @@ -1,70 +0,0 @@ -from solar.third_party.dir_dbm import DirDBM - -import yaml - -from solar import utils -from solar import errors - - -class FileSystemDB(DirDBM): - STORAGE_PATH = utils.read_config()['file-system-db']['storage-path'] - RESOURCE_COLLECTION_NAME = 'resource' - - def __init__(self): - utils.create_dir(self.STORAGE_PATH) - super(FileSystemDB, self).__init__(self.STORAGE_PATH) - self.entities = {} - - def get_resource(self, uid): - return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - - def get_obj_resource(self, uid): - if not uid in self.entities: - from solar.core.resource import wrap_resource - raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] - self.entities[uid] = wrap_resource(raw_resource) - return self.entities[uid] - - def add_resource(self, uid, resource): - self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource - - def store(self, collection, obj): - if 'id' in obj: - self[self._make_key(collection, obj['id'])] = obj - else: - raise errors.CannotFindID('Cannot find id for object {0}'.format(obj)) - - def store_list(self, collection, objs): - for obj in objs: - self.store(collection, obj) - - def get_list(self, collection): - collection_keys = filter( - lambda k: k.startswith('{0}-'.format(collection)), - self.keys()) - - return map(lambda k: self[k], collection_keys) - - def get_record(self, collection, _id): - key = self._make_key(collection, _id) - if key not in self: - return None - - return self[key] - - def _make_key(self, collection, _id): - return '{0}-{1}'.format(collection, _id) - - def _readFile(self, path): - return yaml.load(super(FileSystemDB, self)._readFile(path)) - - def _writeFile(self, path, data): - return super(FileSystemDB, self)._writeFile(path, utils.yaml_dump(data)) - - def _encode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key - - def _decode(self, key): - """Override method of the parent not to use base64 as a key for encoding""" - return key diff --git a/solar/solar/interfaces/db/redis_db.py b/solar/solar/interfaces/db/redis_db.py index 3c43ccb7..abd47710 100644 --- a/solar/solar/interfaces/db/redis_db.py +++ b/solar/solar/interfaces/db/redis_db.py @@ -1,6 +1,7 @@ from enum import Enum import json import redis +import fakeredis from solar import utils from solar import errors @@ -15,9 +16,11 @@ class RedisDB(object): 'host': 'localhost', 'port': 6379, } + REDIS_CLIENT = redis.StrictRedis + def __init__(self): - self._r = redis.StrictRedis(**self.DB) + self._r = self.REDIS_CLIENT(**self.DB) self.entities = {} def read(self, uid, collection=COLLECTIONS.resource): @@ -34,6 +37,9 @@ class RedisDB(object): json.dumps(data) ) + def delete(self, uid, collection): + return self._r.delete(self._make_key(collection, uid)) + def get_list(self, collection=COLLECTIONS.resource): key_glob = self._make_key(collection, '*') @@ -44,4 +50,9 @@ class RedisDB(object): self._r.flushdb() def _make_key(self, collection, _id): - return '{0}:{1}'.format(collection, _id) + return '{0}:{1}'.format(collection.name, _id) + + +class FakeRedisDB(RedisDB): + + REDIS_CLIENT = fakeredis.FakeStrictRedis diff --git a/solar/solar/operations.py b/solar/solar/operations.py index a3e753fc..d9f00cec 100644 --- a/solar/solar/operations.py +++ b/solar/solar/operations.py @@ -42,18 +42,25 @@ def connections(res, graph): def to_dict(resource, graph): - return {'uid': resource.name, - 'tags': resource.tags, - 'args': resource.args_dict(), - 'connections': connections(resource, graph)} + res = resource.to_dict() + res['connections'] = connections(resource, graph) + return res -def stage_changes(): - resources = resource.load_all() - conn_graph = signals.detailed_connection_graph() +def create_diff(staged, commited): + if 'connections' in commited: + commited['connections'].sort() + staged['connections'].sort() + if 'tags' in commited: + commited['tags'].sort() + staged['tags'].sort() + + return list(diff(commited, staged)) + + +def _stage_changes(staged_resources, conn_graph, + commited_resources, staged_log): - commited = state.CD() - log = state.SL() action = None try: @@ -64,17 +71,11 @@ def stage_changes(): raise for res_uid in srt: - commited_data = commited.get(res_uid, {}) - staged_data = to_dict(resources[res_uid], conn_graph) + commited_data = commited_resources.get(res_uid, {}) + staged_data = staged_resources.get(res_uid, {}) - if 'connections' in commited_data: - commited_data['connections'].sort() - staged_data['connections'].sort() - if 'tags' in commited_data: - commited_data['tags'].sort() - staged_data['tags'].sort() + df = create_diff(staged_data, commited_data) - df = list(diff(commited_data, staged_data)) if df: log_item = state.LogItem( @@ -82,11 +83,22 @@ def stage_changes(): res_uid, df, guess_action(commited_data, staged_data)) - log.add(log_item) - return log + staged_log.append(log_item) + return staged_log + + +def stage_changes(): + resources = resource.load_all() + conn_graph = signals.detailed_connection_graph() + staged = {r.name: to_dict(r, conn_graph) for r in resource.load_all().values()} + commited = state.CD() + log = state.SL() + log.delete() + return _stage_changes(staged, conn_graph, commited, log) def execute(res, action): + return state.STATES.success try: actions.resource_action(res, action) return state.STATES.success @@ -94,22 +106,15 @@ def execute(res, action): return state.STATES.error -def commit(li, resources): - commited = state.CD() - history = state.CL() - staged = state.SL() +def commit(li, resources, commited, history): staged_res = resources[li.res] - staged_data = patch(li.diff, commited.get(li.res, {})) # TODO(dshulyak) think about this hack for update if li.action == 'update': - commited_res = resource.Resource( - staged_res.name, - staged_res.metadata, - commited[li.res]['args'], - commited[li.res]['tags']) + commited_res = resource.wrap_resource( + commited[li.res]['metadata']) result_state = execute(commited_res, 'remove') if result_state is state.STATES.success: @@ -123,16 +128,19 @@ def commit(li, resources): commited[li.res] = staged_data li.state = result_state - history.add(li) + history.append(li) if result_state is state.STATES.error: raise Exception('Failed') def commit_one(): + commited = state.CD() + history = state.CL() staged = state.SL() + resources = resource.load_all() - commit(staged.popleft(), resources) + commit(staged.popleft(), resources, commited, history) def commit_changes(): @@ -143,7 +151,7 @@ def commit_changes(): resources = resource.load_all() while staged: - commit(staged.popleft(), resources) + commit(staged.popleft(), resources, commited, history) def rollback(log_item): @@ -160,18 +168,18 @@ def rollback(log_item): for e, r, mapping in staged.get('connections', ()): signals.connect(resources[e], resources[r], dict([mapping])) - df = list(diff(commited, staged)) + df = create_diff(staged, commited) log_item = state.LogItem( utils.generate_uuid(), log_item.res, df, guess_action(commited, staged)) - log.add(log_item) + log.append(log_item) res = resource.load(log_item.res) - res.update(staged.get('args', {})) + res.set_args(staged['input']) res.save() - return log + return log_item def rollback_uid(uid): diff --git a/solar/solar/state.py b/solar/solar/state.py index 0f3af6c2..d6c6d7c9 100644 --- a/solar/solar/state.py +++ b/solar/solar/state.py @@ -83,6 +83,10 @@ class Log(object): l['diff'], l['action'], getattr(STATES, l['state'])) for l in items]) + def delete(self): + self.items = deque() + db.delete(self.path, db.COLLECTIONS.state_log) + def sync(self): db.save( self.path, @@ -90,7 +94,7 @@ class Log(object): collection=db.COLLECTIONS.state_log ) - def add(self, logitem): + def append(self, logitem): self.items.append(logitem) self.sync() @@ -108,6 +112,9 @@ class Log(object): return ['L(uuid={0}, res={1}, action={2})'.format( l.uid, l.res, l.action) for l in self.items] + def __len__(self): + return len(self.items) + def __repr__(self): return 'Log({0})'.format(self.path) diff --git a/solar/solar/test/conftest.py b/solar/solar/test/conftest.py new file mode 100644 index 00000000..10845b22 --- /dev/null +++ b/solar/solar/test/conftest.py @@ -0,0 +1,24 @@ + +import os + +from pytest import fixture + +from solar.interfaces import db +from solar import utils + + +def pytest_configure(): + db.DB = db.mapping['fakeredis_db']() + + +@fixture(autouse=True) +def cleanup(request): + + def fin(): + from solar.core import signals + + db.get_db().clear() + signals.Connections.clear() + + request.addfinalizer(fin) + diff --git a/solar/solar/test/test_diff_generation.py b/solar/solar/test/test_diff_generation.py new file mode 100644 index 00000000..8b06d9ee --- /dev/null +++ b/solar/solar/test/test_diff_generation.py @@ -0,0 +1,109 @@ + +from pytest import fixture +import mock +from dictdiffer import revert, patch, diff +import networkx as nx + +from solar import operations +from solar.core.resource import wrap_resource + + +@fixture +def staged(): + return {'id': 'res.1', + 'tags': ['res', 'node.1'], + 'input': {'ip': {'value': '10.0.0.2'}, + 'list_val': {'value': [1, 2]}}, + 'metadata': {}, + 'connections': [ + ['node.1', 'res.1', ['ip', 'ip']], + ['node.1', 'res.1', ['key', 'key']]] + } + +@fixture +def commited(): + return {'id': 'res.1', + 'tags': ['res', 'node.1'], + 'input': {'ip': '10.0.0.2', + 'list_val': [1]}, + 'metadata': {}, + 'connections': [ + ['node.1', 'res.1', ['ip', 'ip']]] + } + +@fixture +def full_diff(staged): + return operations.create_diff(staged, {}) + + +@fixture +def diff_for_update(staged, commited): + return operations.create_diff(staged, commited) + + +def test_create_diff_with_empty_commited(full_diff): + # add will be executed + expected = [('add', '', [('connections', [['node.1', 'res.1', ['ip', 'ip']], ['node.1', 'res.1', ['key', 'key']]]), ('input', {'ip': {'value': '10.0.0.2'}, 'list_val': {'value': [1, 2]}}), ('metadata', {}), ('id', 'res.1'), ('tags', ['res', 'node.1'])])] + assert full_diff == expected + + +def test_create_diff_modified(diff_for_update): + assert diff_for_update == [ + ('add', 'connections', + [(1, ['node.1', 'res.1', ['key', 'key']])]), + ('change', 'input.ip', ('10.0.0.2', {'value': '10.0.0.2'})), + ('change', 'input.list_val', ([1], {'value': [1, 2]}))] + + +def test_verify_patch_creates_expected(staged, diff_for_update, commited): + expected = patch(diff_for_update, commited) + assert expected == staged + + +def test_revert_update(staged, diff_for_update, commited): + expected = revert(diff_for_update, staged) + assert expected == commited + + +@fixture +def resources(): + r = {'n.1': + {'uid': 'n.1', + 'args': {'ip': '10.20.0.2'}, + 'connections': [], + 'tags': []}, + 'r.1': + {'uid': 'r.1', + 'args': {'ip': '10.20.0.2'}, + 'connections': [['n.1', 'r.1', ['ip', 'ip']]], + 'tags': []}, + 'h.1': + {'uid': 'h.1', + 'args': {'ip': '10.20.0.2', + 'ips': ['10.20.0.2']}, + 'connections': [['n.1', 'h.1', ['ip', 'ip']]], + 'tags': []}} + return r + +@fixture +def conn_graph(): + edges = [ + ('n.1', 'r.1', {'label': 'ip:ip'}), + ('n.1', 'h.1', {'label': 'ip:ip'}), + ('r.1', 'h.1', {'label': 'ip:ips'}) + ] + mdg = nx.MultiDiGraph() + mdg.add_edges_from(edges) + return mdg + + +def test_stage_changes(resources, conn_graph): + commited = {} + log = operations._stage_changes(resources, conn_graph, commited, []) + + assert len(log) == 3 + assert [l.res for l in log] == ['n.1', 'r.1', 'h.1'] + + +def test_resource_fixture(staged): + res = wrap_resource(staged) diff --git a/solar/solar/test/test_stage_commit_procedure.py b/solar/solar/test/test_stage_commit_procedure.py new file mode 100644 index 00000000..7da48872 --- /dev/null +++ b/solar/solar/test/test_stage_commit_procedure.py @@ -0,0 +1,77 @@ + +import pytest +from mock import patch + +from solar.core import resource +from solar import operations +from solar import state + + +@pytest.fixture +def default_resources(): + from solar.core import signals + from solar.core import resource + + node1 = resource.wrap_resource( + {'id': 'node1', + 'input': {'ip': {'value':'10.0.0.3'}}}) + node1.save() + rabbitmq_service1 = resource.wrap_resource( + {'id':'rabbitmq', 'input': { + 'ip' : {'value': ''}, + 'image': {'value': 'rabbitmq:3-management'}}}) + rabbitmq_service1.save() + signals.connect(node1, rabbitmq_service1) + return resource.load_all() + + +@patch('solar.core.actions.resource_action') +@pytest.mark.usefixtures("default_resources") +def test_changes_on_update_image(maction): + log = operations.stage_changes() + + assert len(log) == 2 + + operations.commit_changes() + + rabbitmq = resource.load('rabbitmq') + rabbitmq.update({'image': 'different'}) + log = operations.stage_changes() + + assert len(log) == 1 + + item = log.items[0] + + assert item.diff == [ + ('change', u'input.image.value', + (u'rabbitmq:3-management', u'different')), + ('change', u'metadata.input.image.value', + (u'rabbitmq:3-management', u'different'))] + + assert item.action == 'update' + + operations.commit_changes() + + commited = state.CD() + + assert commited['rabbitmq']['input']['image'] == { + u'emitter': None, u'value': u'different'} + + reverse = operations.rollback(state.CL().items[-1]) + + assert reverse.diff == [ + ('change', u'input.image.value', + (u'different', u'rabbitmq:3-management')), + ('change', u'metadata.input.image.value', + (u'different', u'rabbitmq:3-management'))] + + operations.commit_changes() + + commited = state.CD() + + assert commited['rabbitmq']['input']['image'] == { + u'emitter': None, u'value': u'rabbitmq:3-management'} + + + + diff --git a/solar/solar/test/test_update_propagated_data.py b/solar/solar/test/test_update_propagated_data.py new file mode 100644 index 00000000..0134d177 --- /dev/null +++ b/solar/solar/test/test_update_propagated_data.py @@ -0,0 +1,124 @@ +import pytest +from mock import patch + +from solar.core import signals +from solar.core import resource +from solar import operations +from solar import state + +@pytest.fixture +def resources(): + + node1 = resource.wrap_resource( + {'id': 'node1', + 'input': {'ip': {'value':'10.0.0.3'}}}) + node1.save() + mariadb_service1 = resource.wrap_resource( + {'id':'mariadb', 'input': { + 'port' : {'value': 3306}, + 'ip': {'value': ''}}}) + mariadb_service1.save() + keystone_db = resource.wrap_resource( + {'id':'keystone_db', + 'input': { + 'login_port' : {'value': ''}, + 'ip': {'value': ''}}}) + keystone_db.save() + signals.connect(node1, mariadb_service1) + signals.connect(node1, keystone_db) + signals.connect(mariadb_service1, keystone_db, {'port': 'login_port'}) + return resource.load_all() + + +@patch('solar.core.actions.resource_action') +def test_update_port_on_mariadb(maction, resources): + operations.stage_changes() + operations.commit_changes() + + mariadb = resources['mariadb'] + + mariadb.update({'port': 4400}) + + log = operations.stage_changes() + + assert len(log) == 2 + + mariadb_log = log.items[0] + + assert mariadb_log.diff == [ + ('change', u'input.port.value', (3306, 4400)), + ('change', u'metadata.input.port.value', (3306, 4400))] + + keystone_db_log = log.items[1] + + assert keystone_db_log.diff == [ + ('change', u'input.login_port.value', (3306, 4400)), + ('change', u'metadata.input.login_port.value', (3306, 4400))] + + +@pytest.fixture +def list_input(): + res1 = resource.wrap_resource( + {'id': 'res1', 'input': {'ip': {'value': '10.10.0.2'}}}) + res1.save() + res2 = resource.wrap_resource( + {'id': 'res2', 'input': {'ip': {'value': '10.10.0.3'}}}) + res2.save() + consumer = resource.wrap_resource( + {'id': 'consumer', 'input': + {'ips': {'value': [], + 'schema': ['str']}}}) + consumer.save() + + signals.connect(res1, consumer, {'ip': 'ips'}) + signals.connect(res2, consumer, {'ip': 'ips'}) + return resource.load_all() + + +@patch('solar.core.actions.resource_action') +def test_update_list_resource(maction, list_input): + operations.stage_changes() + operations.commit_changes() + + res3 = resource.wrap_resource( + {'id': 'res3', 'input': {'ip': {'value': '10.10.0.4'}}}) + res3.save() + signals.connect(res3, list_input['consumer'], {'ip': 'ips'}) + + log = operations.stage_changes() + + assert len(log) == 2 + + assert log.items[0].res == res3.name + assert log.items[1].diff == [ + ('add', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]), + ('add', u'input.ips', [ + (2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]), + ('add', u'metadata.input.ips.value', + [(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])] + + operations.commit_changes() + assert list_input['consumer'].args_dict() == { + u'ips': [ + {u'emitter_attached_to': u'res1', u'emitter': u'ip', u'value': u'10.10.0.2'}, + {u'emitter_attached_to': u'res2', u'emitter': u'ip', u'value': u'10.10.0.3'}, + {'emitter_attached_to': 'res3', 'emitter': 'ip', 'value': '10.10.0.4'}]} + + log_item = operations.rollback_last() + assert log_item.diff == [ + ('remove', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]), + ('remove', u'input.ips', [ + (2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]), + ('remove', u'metadata.input.ips.value', + [(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])] + + consumer = resource.load('consumer') + assert consumer.args_dict() == { + u'ips': [{u'emitter': u'ip', + u'emitter_attached_to': u'res1', + u'value': u'10.10.0.2'}, + {u'emitter': u'ip', + u'emitter_attached_to': u'res2', + u'value': u'10.10.0.3'}]} + + diff --git a/solar/solar/third_party/__init__.py b/solar/solar/third_party/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/solar/solar/third_party/dir_dbm.py b/solar/solar/third_party/dir_dbm.py deleted file mode 100644 index cf7a4624..00000000 --- a/solar/solar/third_party/dir_dbm.py +++ /dev/null @@ -1,297 +0,0 @@ -# -*- test-case-name: twisted.test.test_dirdbm -*- -# -# Copyright (c) Twisted Matrix Laboratories. -# See LICENSE for details. - - - -""" -DBM-style interface to a directory. -Each key is stored as a single file. This is not expected to be very fast or -efficient, but it's good for easy debugging. -DirDBMs are *not* thread-safe, they should only be accessed by one thread attacheda time. -No files should be placed in the working directory of a DirDBM save those -created by the DirDBM itself! -Maintainer: Itamar Shtull-Trauring -""" - - -import os -import types -import base64 -import glob - -try: - import cPickle as pickle -except ImportError: - import pickle - -try: - _open -except NameError: - _open = open - - -class DirDBM(object): - """A directory with a DBM interface. - - This class presents a hash-like interface to a directory of small, - flat files. It can only use strings as keys or values. - """ - - def __init__(self, name): - """ - @type name: strings - @param name: Base path to use for the directory storage. - """ - self.dname = os.path.abspath(name) - if not os.path.isdir(self.dname): - os.mkdir(self.dname) - else: - # Run recovery, in case we crashed. we delete all files ending - # with ".new". Then we find all files who end with ".rpl". If about - # corresponding file exists without ".rpl", we assume the write - # failed and delete the ".rpl" file. If only a ".rpl" exist we - # assume the program crashed right after deleting the old entry - # but before renaming the replacement entry. - # - # NOTE: '.' is NOT in the base64 alphabet! - for f in glob.glob(os.path.join(self.dname, "*.new")): - os.remove(f) - replacements = glob.glob(os.path.join(self.dname, "*.rpl")) - for f in replacements: - old = f[:-4] - if os.path.exists(old): - os.remove(f) - else: - os.rename(f, old) - - def _encode(self, k): - """Encode a key so it can be used as a filename. - """ - # NOTE: '_' is NOT in the base64 alphabet! - return base64.encodestring(k).replace('\n', '_').replace("/", "-") - - def _decode(self, k): - """Decode a filename to get the key. - """ - return base64.decodestring(k.replace('_', '\n').replace("-", "/")) - - def _readFile(self, path): - """Read in the contents of a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "rb") - s = f.read() - f.close() - return s - - def _writeFile(self, path, data): - """Write data to a file. - - Override in subclasses to e.g. provide transparently encrypted dirdbm. - """ - f = _open(path, "wb") - f.write(data) - f.flush() - f.close() - - def __len__(self): - """ - @return: The number of key/value pairs in this Shelf - """ - return len(os.listdir(self.dname)) - - def __setitem__(self, k, v): - """ - C{dirdbm[k] = v} - Create or modify a textfile in this directory - @type k: strings @param k: key to setitem - @type v: strings @param v: value to associate with C{k} - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - # NOTE: Can be not a string if _writeFile in the child is redefined - # assert type(v) == types.StringType, "DirDBM value must be a string" - k = self._encode(k) - - # we create a new file with extension .new, write the data to it, and - # if the write succeeds delete the old file and rename the new one. - old = os.path.join(self.dname, k) - if os.path.exists(old): - new = old + ".rpl" # replacement entry - else: - new = old + ".new" # new entry - try: - self._writeFile(new, v) - except: - os.remove(new) - raise - else: - if os.path.exists(old): os.remove(old) - os.rename(new, old) - - def __getitem__(self, k): - """ - C{dirdbm[k]} - Get the contents of a file in this directory as a string. - - @type k: string @param k: key to lookup - - @return: The value associated with C{k} - @raise KeyError: Raised when there is no such keyerror - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(k)) - try: - return self._readFile(path) - except Exception as exc: - raise KeyError, k - - def __delitem__(self, k): - """ - C{del dirdbm[foo]} - Delete a file in this directory. - - @type k: string - @param k: key to delete - - @raise KeyError: Raised when there is no such keyerror - """ - assert type(k) == types.StringType, "DirDBM key must be a string" - k = self._encode(k) - try: os.remove(os.path.join(self.dname, k)) - except (OSError, IOError): raise KeyError(self._decode(k)) - - def keys(self): - """ - @return: a C{list} of filenames (keys). - """ - return map(self._decode, os.listdir(self.dname)) - - def values(self): - """ - @return: a C{list} of file-contents (values). - """ - vals = [] - keys = self.keys() - for key in keys: - vals.append(self[key]) - return vals - - def items(self): - """ - @return: a C{list} of 2-tuples containing key/value pairs. - """ - items = [] - keys = self.keys() - for key in keys: - items.append((key, self[key])) - return items - - def has_key(self, key): - """ - @type key: string - @param key: The key to test - - @return: A true value if this dirdbm has the specified key, a faluse - value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def setdefault(self, key, value): - """ - @type key: string - @param key: The key to lookup - - @param value: The value to associate with key if key is not already - associated with a value. - """ - if not self.has_key(key): - self[key] = value - return value - return self[key] - - def get(self, key, default = None): - """ - @type key: string - @param key: The key to lookup - - @param default: The value to return if the given key does not exist - @return: The value associated with C{key} or C{default} if note - C{self.has_key(key)} - """ - if self.has_key(key): - return self[key] - else: - return default - - def __contains__(self, key): - """ - C{key in dirdbm} - @type key: string - @param key: The key to test - - @return: A true value if C{self.has_key(key)}, a false value otherwise. - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - key = self._encode(key) - return os.path.isfile(os.path.join(self.dname, key)) - - def update(self, dict): - """ - Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting - keys will be overwritten with the values from C{dict}. - @type dict: mapping - @param dict: A mapping of key/value pairs to add to this dirdbm. - """ - for key, val in dict.items(): - self[key]=valid - - def copyTo(self, path): - """ - Copy the contents of this dirdbm to the dirdbm at C{path}. - - @type path: C{str} - @param path: The path of the dirdbm to copy to. If a dirdbm - exists at the destination path, it is cleared first. - - @rtype: C{DirDBM} - @return: The dirdbm this dirdbm was copied to. - """ - path = os.path.abspath(path) - assert path != self.dname - - d = self.__class__(path) - d.clear() - for k in self.keys(): - d[k] = self[k] - return data - - def clear(self): - """ - Delete all key/value pairs in this dirdbm. - """ - for k in self.keys(): - del self[k] - - def close(self): - """ - Close this dbm: no-op, for dbm-style interface compliance. - """ - - def getModificationTime(self, key): - """ - Returns modification time of an entry. - - @return: Last modification date (seconds since epoch) of entry C{key} - @raise KeyError: Raised when there is no such keyerror - """ - assert type(key) == types.StringType, "DirDBM key must be a string" - path = os.path.join(self.dname, self._encode(key)) - if os.path.isfile(path): - return os.path.getmtime(path) - else: - raise KeyError, key