Merge branch 'master' into redis-proxied-resource

This commit is contained in:
Przemyslaw Kaminski 2015-06-12 09:39:12 +02:00
commit c394ea560e
16 changed files with 412 additions and 550 deletions

1
.gitignore vendored
View File

@ -16,3 +16,4 @@ clients.json
rs/
solar.log
x-venv/

View File

@ -20,8 +20,6 @@ pip install -r solar/requirements.txt --download-cache=/tmp/$JOB_NAME
pushd solar/solar
PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_resource.py
PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_signals.py
PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE python test/test_validation.py
PYTHONPATH=$WORKSPACE/solar CONFIG_FILE=$CONFIG_FILE py.test test/
popd

View File

@ -10,3 +10,5 @@ mock
dictdiffer==0.4.0
enum34==1.0.4
redis==2.10.3
pytest
fakeredis

View File

@ -23,7 +23,6 @@ class Resource(object):
self.name = name
self.metadata = metadata
# TODO: read tags from DB on demand
self.tags = tags or []
self.set_args_from_dict(args)
@ -76,7 +75,7 @@ class Resource(object):
self.set_args_from_dict({k: v.value for k, v in args.items()})
def __repr__(self):
return ("Resource(name='{name}', metadata={metadata}, args={args}, "
return ("Resource(name='{id}', metadata={metadata}, args={input}, "
"tags={tags})").format(**self.to_dict())
def color_repr(self):
@ -84,8 +83,8 @@ class Resource(object):
arg_color = 'yellow'
return ("{resource_s}({name_s}='{name}', {metadata_s}={metadata}, "
"{args_s}={args}, {tags_s}={tags})").format(
return ("{resource_s}({name_s}='{id}', {metadata_s}={metadata}, "
"{args_s}={input}, {tags_s}={tags})").format(
resource_s=click.style('Resource', fg='white', bold=True),
name_s=click.style('name', fg=arg_color, bold=True),
metadata_s=click.style('metadata', fg=arg_color, bold=True),
@ -96,9 +95,9 @@ class Resource(object):
def to_dict(self):
return {
'name': self.name,
'id': self.name,
'metadata': self.metadata,
'args': self.args_show(),
'input': self.args_show(),
'tags': self.tags,
}

View File

@ -1,11 +1,10 @@
from solar.interfaces.db.cached_file_system_db import CachedFileSystemDB
from solar.interfaces.db.file_system_db import FileSystemDB
from solar.interfaces.db.redis_db import RedisDB
from solar.interfaces.db.redis_db import FakeRedisDB
mapping = {
'cached_file_system': CachedFileSystemDB,
'file_system': FileSystemDB,
'redis_db': RedisDB,
'fakeredis_db': FakeRedisDB
}
DB = None

View File

@ -1,110 +0,0 @@
from solar.third_party.dir_dbm import DirDBM
import atexit
import os
import types
import yaml
from solar import utils
from solar import errors
class CachedFileSystemDB(DirDBM):
STORAGE_PATH = utils.read_config()['file-system-db']['storage-path']
RESOURCE_COLLECTION_NAME = 'resource'
_CACHE = {}
def __init__(self):
utils.create_dir(self.STORAGE_PATH)
super(CachedFileSystemDB, self).__init__(self.STORAGE_PATH)
self.entities = {}
atexit.register(self.flush)
def __setitem__(self, k, v):
"""
C{dirdbm[k] = v}
Create or modify a textfile in this directory
@type k: strings @param k: key to setitem
@type v: strings @param v: value to associate with C{k}
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
# NOTE: Can be not a string if _writeFile in the child is redefined
# assert type(v) == types.StringType, "DirDBM value must be a string"
k = self._encode(k)
# we create a new file with extension .new, write the data to it, and
# if the write succeeds delete the old file and rename the new one.
old = os.path.join(self.dname, k)
if os.path.exists(old):
new = old + ".rpl" # replacement entry
else:
new = old + ".new" # new entry
try:
self._writeFile(old, v)
except:
raise
def get_resource(self, uid):
return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)]
def get_obj_resource(self, uid):
from solar.core.resource import wrap_resource
raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)]
return wrap_resource(raw_resource)
def add_resource(self, uid, resource):
self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource
def store(self, collection, obj):
if 'id' in obj:
self[self._make_key(collection, obj['id'])] = obj
else:
raise errors.CannotFindID('Cannot find id for object {0}'.format(obj))
def store_list(self, collection, objs):
for obj in objs:
self.store(collection, obj)
def get_list(self, collection):
collection_keys = filter(
lambda k: k.startswith('{0}-'.format(collection)),
self.keys())
return map(lambda k: self[k], collection_keys)
def get_record(self, collection, _id):
key = self._make_key(collection, _id)
if key not in self:
return None
return self[key]
def _make_key(self, collection, _id):
return '{0}-{1}'.format(collection, _id)
def _readFile(self, path):
if path not in self._CACHE:
data = yaml.load(super(CachedFileSystemDB, self)._readFile(path))
self._CACHE[path] = data
return data
return self._CACHE[path]
def _writeFile(self, path, data):
self._CACHE[path] = data
def _encode(self, key):
"""Override method of the parent not to use base64 as a key for encoding"""
return key
def _decode(self, key):
"""Override method of the parent not to use base64 as a key for encoding"""
return key
def flush(self):
print 'FLUSHING DB'
for path, data in self._CACHE.items():
super(CachedFileSystemDB, self)._writeFile(path, yaml.dump(data))

View File

@ -1,70 +0,0 @@
from solar.third_party.dir_dbm import DirDBM
import yaml
from solar import utils
from solar import errors
class FileSystemDB(DirDBM):
STORAGE_PATH = utils.read_config()['file-system-db']['storage-path']
RESOURCE_COLLECTION_NAME = 'resource'
def __init__(self):
utils.create_dir(self.STORAGE_PATH)
super(FileSystemDB, self).__init__(self.STORAGE_PATH)
self.entities = {}
def get_resource(self, uid):
return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)]
def get_obj_resource(self, uid):
if not uid in self.entities:
from solar.core.resource import wrap_resource
raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)]
self.entities[uid] = wrap_resource(raw_resource)
return self.entities[uid]
def add_resource(self, uid, resource):
self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource
def store(self, collection, obj):
if 'id' in obj:
self[self._make_key(collection, obj['id'])] = obj
else:
raise errors.CannotFindID('Cannot find id for object {0}'.format(obj))
def store_list(self, collection, objs):
for obj in objs:
self.store(collection, obj)
def get_list(self, collection):
collection_keys = filter(
lambda k: k.startswith('{0}-'.format(collection)),
self.keys())
return map(lambda k: self[k], collection_keys)
def get_record(self, collection, _id):
key = self._make_key(collection, _id)
if key not in self:
return None
return self[key]
def _make_key(self, collection, _id):
return '{0}-{1}'.format(collection, _id)
def _readFile(self, path):
return yaml.load(super(FileSystemDB, self)._readFile(path))
def _writeFile(self, path, data):
return super(FileSystemDB, self)._writeFile(path, utils.yaml_dump(data))
def _encode(self, key):
"""Override method of the parent not to use base64 as a key for encoding"""
return key
def _decode(self, key):
"""Override method of the parent not to use base64 as a key for encoding"""
return key

View File

@ -1,6 +1,7 @@
from enum import Enum
import json
import redis
import fakeredis
from solar import utils
from solar import errors
@ -15,9 +16,11 @@ class RedisDB(object):
'host': 'localhost',
'port': 6379,
}
REDIS_CLIENT = redis.StrictRedis
def __init__(self):
self._r = redis.StrictRedis(**self.DB)
self._r = self.REDIS_CLIENT(**self.DB)
self.entities = {}
def read(self, uid, collection=COLLECTIONS.resource):
@ -28,26 +31,6 @@ class RedisDB(object):
except TypeError:
return None
def save(self, uid, data, collection=COLLECTIONS.resource):
ret = self._r.set(
self._make_key(collection, uid),
json.dumps(data)
)
self._r.save()
return ret
def save_list(self, lst, collection=COLLECTIONS.resource):
with self._r.pipeline() as pipe:
pipe.multi()
for uid, data in lst:
key = self._make_key(collection, uid)
pipe.set(key, json.dumps(data))
pipe.execute()
def get_list(self, collection=COLLECTIONS.resource):
key_glob = self._make_key(collection, '*')
@ -63,6 +46,24 @@ class RedisDB(object):
for value in values:
yield json.loads(value)
def save(self, uid, data, collection=COLLECTIONS.resource):
ret = self._r.set(
self._make_key(collection, uid),
json.dumps(data)
)
return ret
def save_list(self, lst, collection=COLLECTIONS.resource):
with self._r.pipeline() as pipe:
pipe.multi()
for uid, data in lst:
key = self._make_key(collection, uid)
pipe.set(key, json.dumps(data))
pipe.execute()
def clear(self):
self._r.flushdb()
@ -79,3 +80,8 @@ class RedisDB(object):
collection = collection.name
return '{0}:{1}'.format(collection, _id)
class FakeRedisDB(RedisDB):
REDIS_CLIENT = fakeredis.FakeStrictRedis

View File

@ -43,18 +43,25 @@ def connections(res, graph):
def to_dict(resource, graph):
return {'uid': resource.name,
'tags': resource.tags,
'args': resource.args_dict(),
'connections': connections(resource, graph)}
res = resource.to_dict()
res['connections'] = connections(resource, graph)
return res
def stage_changes():
resources = resource.load_all()
conn_graph = signals.detailed_connection_graph()
def create_diff(staged, commited):
if 'connections' in commited:
commited['connections'].sort()
staged['connections'].sort()
if 'tags' in commited:
commited['tags'].sort()
staged['tags'].sort()
return list(diff(commited, staged))
def _stage_changes(staged_resources, conn_graph,
commited_resources, staged_log):
commited = state.CD()
log = state.SL()
action = None
try:
@ -65,17 +72,11 @@ def stage_changes():
raise
for res_uid in srt:
commited_data = commited.get(res_uid, {})
staged_data = to_dict(resources[res_uid], conn_graph)
commited_data = commited_resources.get(res_uid, {})
staged_data = staged_resources.get(res_uid, {})
if 'connections' in commited_data:
commited_data['connections'].sort()
staged_data['connections'].sort()
if 'tags' in commited_data:
commited_data['tags'].sort()
staged_data['tags'].sort()
df = create_diff(staged_data, commited_data)
df = list(diff(commited_data, staged_data))
if df:
log_item = state.LogItem(
@ -83,11 +84,22 @@ def stage_changes():
res_uid,
df,
guess_action(commited_data, staged_data))
log.add(log_item)
return log
staged_log.append(log_item)
return staged_log
def stage_changes():
resources = resource.load_all()
conn_graph = signals.detailed_connection_graph()
staged = {r.name: to_dict(r, conn_graph) for r in resource.load_all().values()}
commited = state.CD()
log = state.SL()
log.delete()
return _stage_changes(staged, conn_graph, commited, log)
def execute(res, action):
return state.STATES.success
try:
actions.resource_action(res, action)
return state.STATES.success
@ -95,22 +107,15 @@ def execute(res, action):
return state.STATES.error
def commit(li, resources):
commited = state.CD()
history = state.CL()
staged = state.SL()
def commit(li, resources, commited, history):
staged_res = resources[li.res]
staged_data = patch(li.diff, commited.get(li.res, {}))
# TODO(dshulyak) think about this hack for update
if li.action == 'update':
commited_res = resource.Resource(
staged_res.name,
staged_res.metadata,
commited[li.res]['args'],
commited[li.res]['tags'])
commited_res = resource.wrap_resource(
commited[li.res]['metadata'])
result_state = execute(commited_res, 'remove')
if result_state is state.STATES.success:
@ -124,16 +129,19 @@ def commit(li, resources):
commited[li.res] = staged_data
li.state = result_state
history.add(li)
history.append(li)
if result_state is state.STATES.error:
raise Exception('Failed')
def commit_one():
commited = state.CD()
history = state.CL()
staged = state.SL()
resources = resource.load_all()
commit(staged.popleft(), resources)
commit(staged.popleft(), resources, commited, history)
def commit_changes():
@ -144,7 +152,7 @@ def commit_changes():
resources = resource.load_all()
while staged:
commit(staged.popleft(), resources)
commit(staged.popleft(), resources, commited, history)
def rollback(log_item):
@ -161,18 +169,17 @@ def rollback(log_item):
for e, r, mapping in staged.get('connections', ()):
signals.connect(resources[e], resources[r], dict([mapping]))
df = list(diff(commited, staged))
df = create_diff(staged, commited)
log_item = state.LogItem(
utils.generate_uuid(),
log_item.res, df, guess_action(commited, staged))
log.add(log_item)
log.append(log_item)
res = resource.load(log_item.res)
res.update(staged.get('args', {}))
#res.save()
res.set_args_from_dict(staged['input'])
return log
return log_item
def rollback_uid(uid):

View File

@ -83,6 +83,10 @@ class Log(object):
l['diff'], l['action'],
getattr(STATES, l['state'])) for l in items])
def delete(self):
self.items = deque()
db.delete(self.path, db.COLLECTIONS.state_log)
def sync(self):
db.save(
self.path,
@ -90,7 +94,7 @@ class Log(object):
collection=db.COLLECTIONS.state_log
)
def add(self, logitem):
def append(self, logitem):
self.items.append(logitem)
self.sync()
@ -108,6 +112,9 @@ class Log(object):
return ['L(uuid={0}, res={1}, action={2})'.format(
l.uid, l.res, l.action) for l in self.items]
def __len__(self):
return len(self.items)
def __repr__(self):
return 'Log({0})'.format(self.path)

View File

@ -0,0 +1,24 @@
import os
from pytest import fixture
from solar.interfaces import db
from solar import utils
def pytest_configure():
db.DB = db.mapping['fakeredis_db']()
@fixture(autouse=True)
def cleanup(request):
def fin():
from solar.core import signals
db.get_db().clear()
signals.Connections.clear()
request.addfinalizer(fin)

View File

@ -0,0 +1,109 @@
from pytest import fixture
import mock
from dictdiffer import revert, patch, diff
import networkx as nx
from solar import operations
from solar.core.resource import wrap_resource
@fixture
def staged():
return {'id': 'res.1',
'tags': ['res', 'node.1'],
'input': {'ip': {'value': '10.0.0.2'},
'list_val': {'value': [1, 2]}},
'metadata': {},
'connections': [
['node.1', 'res.1', ['ip', 'ip']],
['node.1', 'res.1', ['key', 'key']]]
}
@fixture
def commited():
return {'id': 'res.1',
'tags': ['res', 'node.1'],
'input': {'ip': '10.0.0.2',
'list_val': [1]},
'metadata': {},
'connections': [
['node.1', 'res.1', ['ip', 'ip']]]
}
@fixture
def full_diff(staged):
return operations.create_diff(staged, {})
@fixture
def diff_for_update(staged, commited):
return operations.create_diff(staged, commited)
def test_create_diff_with_empty_commited(full_diff):
# add will be executed
expected = [('add', '', [('connections', [['node.1', 'res.1', ['ip', 'ip']], ['node.1', 'res.1', ['key', 'key']]]), ('input', {'ip': {'value': '10.0.0.2'}, 'list_val': {'value': [1, 2]}}), ('metadata', {}), ('id', 'res.1'), ('tags', ['res', 'node.1'])])]
assert full_diff == expected
def test_create_diff_modified(diff_for_update):
assert diff_for_update == [
('add', 'connections',
[(1, ['node.1', 'res.1', ['key', 'key']])]),
('change', 'input.ip', ('10.0.0.2', {'value': '10.0.0.2'})),
('change', 'input.list_val', ([1], {'value': [1, 2]}))]
def test_verify_patch_creates_expected(staged, diff_for_update, commited):
expected = patch(diff_for_update, commited)
assert expected == staged
def test_revert_update(staged, diff_for_update, commited):
expected = revert(diff_for_update, staged)
assert expected == commited
@fixture
def resources():
r = {'n.1':
{'uid': 'n.1',
'args': {'ip': '10.20.0.2'},
'connections': [],
'tags': []},
'r.1':
{'uid': 'r.1',
'args': {'ip': '10.20.0.2'},
'connections': [['n.1', 'r.1', ['ip', 'ip']]],
'tags': []},
'h.1':
{'uid': 'h.1',
'args': {'ip': '10.20.0.2',
'ips': ['10.20.0.2']},
'connections': [['n.1', 'h.1', ['ip', 'ip']]],
'tags': []}}
return r
@fixture
def conn_graph():
edges = [
('n.1', 'r.1', {'label': 'ip:ip'}),
('n.1', 'h.1', {'label': 'ip:ip'}),
('r.1', 'h.1', {'label': 'ip:ips'})
]
mdg = nx.MultiDiGraph()
mdg.add_edges_from(edges)
return mdg
def test_stage_changes(resources, conn_graph):
commited = {}
log = operations._stage_changes(resources, conn_graph, commited, [])
assert len(log) == 3
assert [l.res for l in log] == ['n.1', 'r.1', 'h.1']
def test_resource_fixture(staged):
res = wrap_resource(staged)

View File

@ -0,0 +1,75 @@
import pytest
from mock import patch
from solar.core import resource
from solar import operations
from solar import state
@pytest.fixture
def default_resources():
from solar.core import signals
from solar.core import resource
node1 = resource.wrap_resource(
{'id': 'node1',
'input': {'ip': {'value':'10.0.0.3'}}})
rabbitmq_service1 = resource.wrap_resource(
{'id':'rabbitmq', 'input': {
'ip' : {'value': ''},
'image': {'value': 'rabbitmq:3-management'}}})
signals.connect(node1, rabbitmq_service1)
return resource.load_all()
@patch('solar.core.actions.resource_action')
@pytest.mark.usefixtures("default_resources")
def test_changes_on_update_image(maction):
log = operations.stage_changes()
assert len(log) == 2
operations.commit_changes()
rabbitmq = resource.load('rabbitmq')
rabbitmq.update({'image': 'different'})
log = operations.stage_changes()
assert len(log) == 1
item = log.items[0]
assert item.diff == [
('change', u'input.image.value',
(u'rabbitmq:3-management', u'different')),
('change', u'metadata.input.image.value',
(u'rabbitmq:3-management', u'different'))]
assert item.action == 'update'
operations.commit_changes()
commited = state.CD()
assert commited['rabbitmq']['input']['image'] == {
u'emitter': None, u'value': u'different'}
reverse = operations.rollback(state.CL().items[-1])
assert reverse.diff == [
('change', u'input.image.value',
(u'different', u'rabbitmq:3-management')),
('change', u'metadata.input.image.value',
(u'different', u'rabbitmq:3-management'))]
operations.commit_changes()
commited = state.CD()
assert commited['rabbitmq']['input']['image'] == {
u'emitter': None, u'value': u'rabbitmq:3-management'}

View File

@ -0,0 +1,112 @@
import pytest
from solar.core import signals
from solar.core import resource
from solar import operations
@pytest.fixture
def resources():
node1 = resource.wrap_resource(
{'id': 'node1',
'input': {'ip': {'value': '10.0.0.3'}}})
mariadb_service1 = resource.wrap_resource(
{'id': 'mariadb', 'input': {
'port' : {'value': 3306},
'ip': {'value': ''}}})
keystone_db = resource.wrap_resource(
{'id':'keystone_db',
'input': {
'login_port' : {'value': ''},
'ip': {'value': ''}}})
signals.connect(node1, mariadb_service1)
signals.connect(node1, keystone_db)
signals.connect(mariadb_service1, keystone_db, {'port': 'login_port'})
return resource.load_all()
def test_update_port_on_mariadb(resources):
operations.stage_changes()
operations.commit_changes()
mariadb = resources['mariadb']
mariadb.update({'port': 4400})
log = operations.stage_changes()
assert len(log) == 2
mariadb_log = log.items[0]
assert mariadb_log.diff == [
('change', u'input.port.value', (3306, 4400)),
('change', u'metadata.input.port.value', (3306, 4400))]
keystone_db_log = log.items[1]
assert keystone_db_log.diff == [
('change', u'input.login_port.value', (3306, 4400)),
('change', u'metadata.input.login_port.value', (3306, 4400))]
@pytest.fixture
def list_input():
res1 = resource.wrap_resource(
{'id': 'res1', 'input': {'ip': {'value': '10.10.0.2'}}})
res2 = resource.wrap_resource(
{'id': 'res2', 'input': {'ip': {'value': '10.10.0.3'}}})
consumer = resource.wrap_resource(
{'id': 'consumer', 'input':
{'ips': {'value': [],
'schema': ['str']}}})
signals.connect(res1, consumer, {'ip': 'ips'})
signals.connect(res2, consumer, {'ip': 'ips'})
return resource.load_all()
def test_update_list_resource(list_input):
operations.stage_changes()
operations.commit_changes()
res3 = resource.wrap_resource(
{'id': 'res3', 'input': {'ip': {'value': '10.10.0.4'}}})
log = operations.stage_changes()
assert len(log) == 2
assert log.items[0].res == res3.name
assert log.items[1].diff == [
('add', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]),
('add', u'input.ips', [
(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]),
('add', u'metadata.input.ips.value',
[(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])]
operations.commit_changes()
assert list_input['consumer'].args_dict() == {
u'ips': [
{u'emitter_attached_to': u'res1', u'emitter': u'ip', u'value': u'10.10.0.2'},
{u'emitter_attached_to': u'res2', u'emitter': u'ip', u'value': u'10.10.0.3'},
{u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'}]}
log_item = operations.rollback_last()
assert log_item.diff == [
('remove', u'connections', [(2, ['res3', u'consumer', ['ip', 'ips']])]),
('remove', u'input.ips', [
(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})]),
('remove', u'metadata.input.ips.value',
[(2, {u'emitter_attached_to': u'res3', u'emitter': u'ip', u'value': u'10.10.0.4'})])]
consumer = resource.load('consumer')
assert consumer.args_dict() == {
u'ips': [{u'emitter': u'ip',
u'emitter_attached_to': u'res1',
u'value': u'10.10.0.2'},
{u'emitter': u'ip',
u'emitter_attached_to': u'res2',
u'value': u'10.10.0.3'}]}

View File

View File

@ -1,297 +0,0 @@
# -*- test-case-name: twisted.test.test_dirdbm -*-
#
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
DBM-style interface to a directory.
Each key is stored as a single file. This is not expected to be very fast or
efficient, but it's good for easy debugging.
DirDBMs are *not* thread-safe, they should only be accessed by one thread attacheda time.
No files should be placed in the working directory of a DirDBM save those
created by the DirDBM itself!
Maintainer: Itamar Shtull-Trauring
"""
import os
import types
import base64
import glob
try:
import cPickle as pickle
except ImportError:
import pickle
try:
_open
except NameError:
_open = open
class DirDBM(object):
"""A directory with a DBM interface.
This class presents a hash-like interface to a directory of small,
flat files. It can only use strings as keys or values.
"""
def __init__(self, name):
"""
@type name: strings
@param name: Base path to use for the directory storage.
"""
self.dname = os.path.abspath(name)
if not os.path.isdir(self.dname):
os.mkdir(self.dname)
else:
# Run recovery, in case we crashed. we delete all files ending
# with ".new". Then we find all files who end with ".rpl". If about
# corresponding file exists without ".rpl", we assume the write
# failed and delete the ".rpl" file. If only a ".rpl" exist we
# assume the program crashed right after deleting the old entry
# but before renaming the replacement entry.
#
# NOTE: '.' is NOT in the base64 alphabet!
for f in glob.glob(os.path.join(self.dname, "*.new")):
os.remove(f)
replacements = glob.glob(os.path.join(self.dname, "*.rpl"))
for f in replacements:
old = f[:-4]
if os.path.exists(old):
os.remove(f)
else:
os.rename(f, old)
def _encode(self, k):
"""Encode a key so it can be used as a filename.
"""
# NOTE: '_' is NOT in the base64 alphabet!
return base64.encodestring(k).replace('\n', '_').replace("/", "-")
def _decode(self, k):
"""Decode a filename to get the key.
"""
return base64.decodestring(k.replace('_', '\n').replace("-", "/"))
def _readFile(self, path):
"""Read in the contents of a file.
Override in subclasses to e.g. provide transparently encrypted dirdbm.
"""
f = _open(path, "rb")
s = f.read()
f.close()
return s
def _writeFile(self, path, data):
"""Write data to a file.
Override in subclasses to e.g. provide transparently encrypted dirdbm.
"""
f = _open(path, "wb")
f.write(data)
f.flush()
f.close()
def __len__(self):
"""
@return: The number of key/value pairs in this Shelf
"""
return len(os.listdir(self.dname))
def __setitem__(self, k, v):
"""
C{dirdbm[k] = v}
Create or modify a textfile in this directory
@type k: strings @param k: key to setitem
@type v: strings @param v: value to associate with C{k}
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
# NOTE: Can be not a string if _writeFile in the child is redefined
# assert type(v) == types.StringType, "DirDBM value must be a string"
k = self._encode(k)
# we create a new file with extension .new, write the data to it, and
# if the write succeeds delete the old file and rename the new one.
old = os.path.join(self.dname, k)
if os.path.exists(old):
new = old + ".rpl" # replacement entry
else:
new = old + ".new" # new entry
try:
self._writeFile(new, v)
except:
os.remove(new)
raise
else:
if os.path.exists(old): os.remove(old)
os.rename(new, old)
def __getitem__(self, k):
"""
C{dirdbm[k]}
Get the contents of a file in this directory as a string.
@type k: string @param k: key to lookup
@return: The value associated with C{k}
@raise KeyError: Raised when there is no such keyerror
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
path = os.path.join(self.dname, self._encode(k))
try:
return self._readFile(path)
except Exception as exc:
raise KeyError, k
def __delitem__(self, k):
"""
C{del dirdbm[foo]}
Delete a file in this directory.
@type k: string
@param k: key to delete
@raise KeyError: Raised when there is no such keyerror
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
k = self._encode(k)
try: os.remove(os.path.join(self.dname, k))
except (OSError, IOError): raise KeyError(self._decode(k))
def keys(self):
"""
@return: a C{list} of filenames (keys).
"""
return map(self._decode, os.listdir(self.dname))
def values(self):
"""
@return: a C{list} of file-contents (values).
"""
vals = []
keys = self.keys()
for key in keys:
vals.append(self[key])
return vals
def items(self):
"""
@return: a C{list} of 2-tuples containing key/value pairs.
"""
items = []
keys = self.keys()
for key in keys:
items.append((key, self[key]))
return items
def has_key(self, key):
"""
@type key: string
@param key: The key to test
@return: A true value if this dirdbm has the specified key, a faluse
value otherwise.
"""
assert type(key) == types.StringType, "DirDBM key must be a string"
key = self._encode(key)
return os.path.isfile(os.path.join(self.dname, key))
def setdefault(self, key, value):
"""
@type key: string
@param key: The key to lookup
@param value: The value to associate with key if key is not already
associated with a value.
"""
if not self.has_key(key):
self[key] = value
return value
return self[key]
def get(self, key, default = None):
"""
@type key: string
@param key: The key to lookup
@param default: The value to return if the given key does not exist
@return: The value associated with C{key} or C{default} if note
C{self.has_key(key)}
"""
if self.has_key(key):
return self[key]
else:
return default
def __contains__(self, key):
"""
C{key in dirdbm}
@type key: string
@param key: The key to test
@return: A true value if C{self.has_key(key)}, a false value otherwise.
"""
assert type(key) == types.StringType, "DirDBM key must be a string"
key = self._encode(key)
return os.path.isfile(os.path.join(self.dname, key))
def update(self, dict):
"""
Add all the key/value pairs in C{dict} to this dirdbm. Any conflicting
keys will be overwritten with the values from C{dict}.
@type dict: mapping
@param dict: A mapping of key/value pairs to add to this dirdbm.
"""
for key, val in dict.items():
self[key]=valid
def copyTo(self, path):
"""
Copy the contents of this dirdbm to the dirdbm at C{path}.
@type path: C{str}
@param path: The path of the dirdbm to copy to. If a dirdbm
exists at the destination path, it is cleared first.
@rtype: C{DirDBM}
@return: The dirdbm this dirdbm was copied to.
"""
path = os.path.abspath(path)
assert path != self.dname
d = self.__class__(path)
d.clear()
for k in self.keys():
d[k] = self[k]
return data
def clear(self):
"""
Delete all key/value pairs in this dirdbm.
"""
for k in self.keys():
del self[k]
def close(self):
"""
Close this dbm: no-op, for dbm-style interface compliance.
"""
def getModificationTime(self, key):
"""
Returns modification time of an entry.
@return: Last modification date (seconds since epoch) of entry C{key}
@raise KeyError: Raised when there is no such keyerror
"""
assert type(key) == types.StringType, "DirDBM key must be a string"
path = os.path.join(self.dname, self._encode(key))
if os.path.isfile(path):
return os.path.getmtime(path)
else:
raise KeyError, key