Merge pull request #78 from xarses/glance-perf

Glance performance fixes
This commit is contained in:
Łukasz Oleś 2015-06-03 14:08:54 +02:00
commit e1fb708d00
9 changed files with 145 additions and 28 deletions

View File

@ -1,18 +1,18 @@
clients-data-file: /vagrant/tmp/connections.yaml
clients-data-file: /tmp/connections.yaml
tmp: /vagrant/tmp
tmp: /tmp
examples-dir: /vagrant/examples
extensions-dir: /vagrant/solar/solar/extensions
file-system-db:
storage-path: /vagrant/tmp/storage
storage-path: /tmp/storage
template-dir: /vagrant/templates
resources-files-mask: /vagrant/resources/*/*.yaml
node_resource_template: /vagrant/resources/ro_node/
state: /vagrant/state/
state: /tmp/state/

View File

@ -12,7 +12,6 @@ from solar.core import validation
from solar.interfaces.db import get_db
@click.group()
def main():
pass
@ -169,6 +168,8 @@ def deploy():
signals.connect(keystone_config1, glance_api_endpoint, {'admin_token': 'admin_token'})
signals.connect(keystone_service1, glance_api_endpoint, {'ip': 'keystone_host', 'admin_port': 'keystone_port'})
signals.Connections.flush()
has_errors = False
for r in locals().values():
@ -194,12 +195,10 @@ def deploy():
actions.resource_action(keystone_db_user, 'run')
actions.resource_action(keystone_config1, 'run')
actions.resource_action(keystone_service1, 'run')
time.sleep(10) #TODO fix keystone services to check if tables are created
actions.resource_action(keystone_config2, 'run')
actions.resource_action(keystone_service2, 'run')
actions.resource_action(haproxy_config, 'run')
actions.resource_action(haproxy_service, 'run')
time.sleep(10) #TODO fix haproxy to wait until it's ready
actions.resource_action(admin_tenant, 'run')
actions.resource_action(admin_user, 'run')

View File

@ -2,11 +2,14 @@
set -eux
rm -rf /tmp/tmp*
rm /vagrant/tmp/storage/* || true
rm /vagrant/tmp/connections.yaml || true
echo > /vagrant/state/commit_log || true
echo > /vagrant/state/commited_data || true
echo > /vagrant/state/stage_log || true
rm /tmp/storage/* || true
rm /tmp/connections.yaml || true
mkdir -p /tmp/state
echo > /tmp/state/commit_log || true
echo > /tmp/state/commited_data || true
echo > /tmp/state/stage_log || true
find /vagrant/solar/solar -name '*.pyc' -delete || true
sudo docker stop $(sudo docker ps -q) || true

View File

@ -1,9 +1,9 @@
- id: node_1
ip: 10.0.0.2
ssh_user: vagrant
ssh_key: /vagrant/tmp/keys/ssh_private
- id: node_2
ip: 10.0.0.3
ssh_user: vagrant
ssh_key: /vagrant/tmp/keys/ssh_private
ssh_key: /vagrant/.vagrant/machines/solar-dev1/virtualbox/private_key
- id: node_2
ip: 10.0.0.4
ssh_user: vagrant
ssh_key: /vagrant/.vagrant/machines/solar-dev2/virtualbox/private_key

View File

@ -25,7 +25,6 @@ import pprint
import textwrap
import yaml
from solar import extensions
from solar import utils
from solar.core.resource import assign_resources_to_nodes
from solar.core.resource import connect_resources

View File

@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
import atexit
from collections import defaultdict
import itertools
import networkx as nx
@ -30,7 +31,7 @@ class Connections(object):
if [receiver.name, dst] not in CLIENTS[emitter.name][src]:
CLIENTS[emitter.name][src].append([receiver.name, dst])
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
#utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
@staticmethod
def remove(emitter, src, receiver, dst):
@ -39,7 +40,7 @@ class Connections(object):
if destination != [receiver.name, dst]
]
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
#utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
@staticmethod
def reconnect_all():
@ -65,6 +66,14 @@ class Connections(object):
if os.path.exists(path):
os.remove(path)
@staticmethod
def flush():
print 'FLUSHING Connections'
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
atexit.register(Connections.flush)
def guess_mapping(emitter, receiver):
"""Guess connection mapping between emitter and receiver.
@ -136,7 +145,7 @@ def disconnect_by_src(emitter_name, src, receiver):
if destination[0] != receiver.name
]
utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
#utils.save_to_config_file(CLIENTS_CONFIG_KEY, CLIENTS)
def notify(source, key, value):

View File

@ -1,6 +1,8 @@
from solar.interfaces.db.file_system_db import FileSystemDB
from solar.interfaces.db.cached_file_system_db import CachedFileSystemDB
mapping = {
'cached_file_system': CachedFileSystemDB,
'file_system': FileSystemDB
}
@ -10,5 +12,5 @@ def get_db():
# Should be retrieved from config
global DB
if DB is None:
DB = mapping['file_system']()
DB = mapping['cached_file_system']()
return DB

View File

@ -0,0 +1,110 @@
from solar.third_party.dir_dbm import DirDBM
import atexit
import os
import types
import yaml
from solar import utils
from solar import errors
class CachedFileSystemDB(DirDBM):
STORAGE_PATH = utils.read_config()['file-system-db']['storage-path']
RESOURCE_COLLECTION_NAME = 'resource'
_CACHE = {}
def __init__(self):
utils.create_dir(self.STORAGE_PATH)
super(CachedFileSystemDB, self).__init__(self.STORAGE_PATH)
self.entities = {}
atexit.register(self.flush)
def __setitem__(self, k, v):
"""
C{dirdbm[k] = v}
Create or modify a textfile in this directory
@type k: strings @param k: key to setitem
@type v: strings @param v: value to associate with C{k}
"""
assert type(k) == types.StringType, "DirDBM key must be a string"
# NOTE: Can be not a string if _writeFile in the child is redefined
# assert type(v) == types.StringType, "DirDBM value must be a string"
k = self._encode(k)
# we create a new file with extension .new, write the data to it, and
# if the write succeeds delete the old file and rename the new one.
old = os.path.join(self.dname, k)
if os.path.exists(old):
new = old + ".rpl" # replacement entry
else:
new = old + ".new" # new entry
try:
self._writeFile(old, v)
except:
raise
def get_resource(self, uid):
return self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)]
def get_obj_resource(self, uid):
from solar.core.resource import wrap_resource
raw_resource = self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)]
return wrap_resource(raw_resource)
def add_resource(self, uid, resource):
self[self._make_key(self.RESOURCE_COLLECTION_NAME, uid)] = resource
def store(self, collection, obj):
if 'id' in obj:
self[self._make_key(collection, obj['id'])] = obj
else:
raise errors.CannotFindID('Cannot find id for object {0}'.format(obj))
def store_list(self, collection, objs):
for obj in objs:
self.store(collection, obj)
def get_list(self, collection):
collection_keys = filter(
lambda k: k.startswith('{0}-'.format(collection)),
self.keys())
return map(lambda k: self[k], collection_keys)
def get_record(self, collection, _id):
key = self._make_key(collection, _id)
if key not in self:
return None
return self[key]
def _make_key(self, collection, _id):
return '{0}-{1}'.format(collection, _id)
def _readFile(self, path):
if path not in self._CACHE:
data = yaml.load(super(CachedFileSystemDB, self)._readFile(path))
self._CACHE[path] = data
return data
return self._CACHE[path]
def _writeFile(self, path, data):
self._CACHE[path] = data
def _encode(self, key):
"""Override method of the parent not to use base64 as a key for encoding"""
return key
def _decode(self, key):
"""Override method of the parent not to use base64 as a key for encoding"""
return key
def flush(self):
print 'FLUSHING DB'
for path, data in self._CACHE.items():
super(CachedFileSystemDB, self)._writeFile(path, yaml.dump(data))

View File

@ -1,10 +1,5 @@
from solar.third_party.dir_dbm import DirDBM
import os
from fnmatch import fnmatch
from copy import deepcopy
import yaml
from solar import utils