From fb359e177ac017a7e72a01cd8a02ffee120c98cd Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Sat, 17 Aug 2013 03:53:50 -0700 Subject: [PATCH 04/47] Keystone Caching Layer for Manager Calls Implements core elements of Keystone Caching layer. dogpile.cache is used as the caching library to provide flexibility to the cache backend (out of the box, Redis, Memcached, File, and in-Memory). The keystone.common.cache.on_arguments decorator is used cache the return value of method and functions that are meant to be cached. The default behavior is to not cache anything (using a no-op cacher). developing.rst has been updated to give an outline on how to approach adding the caching layer onto a manager object. Subsequent patches will build upon this code to enable caching across the various keystone subsystems. DocImpact partial-blueprint: caching-layer-for-driver-calls Change-Id: I664ddccd8d1d393cf50d24054f946512093fb790 --- keystone/common/cache/__init__.py | 17 ++ keystone/common/cache/backends/__init__.py | 15 ++ keystone/common/cache/backends/noop.py | 51 ++++++ keystone/common/cache/core.py | 183 +++++++++++++++++++++ 4 files changed, 266 insertions(+) create mode 100644 keystone/common/cache/__init__.py create mode 100644 keystone/common/cache/backends/__init__.py create mode 100644 keystone/common/cache/backends/noop.py create mode 100644 keystone/common/cache/core.py diff --git a/keystone/common/cache/__init__.py b/keystone/common/cache/__init__.py new file mode 100644 index 00000000..c556c80b --- /dev/null +++ b/keystone/common/cache/__init__.py @@ -0,0 +1,17 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Metacloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystone.common.cache.core import * # flake8: noqa diff --git a/keystone/common/cache/backends/__init__.py b/keystone/common/cache/backends/__init__.py new file mode 100644 index 00000000..42ad3383 --- /dev/null +++ b/keystone/common/cache/backends/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Metacloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/keystone/common/cache/backends/noop.py b/keystone/common/cache/backends/noop.py new file mode 100644 index 00000000..646610a3 --- /dev/null +++ b/keystone/common/cache/backends/noop.py @@ -0,0 +1,51 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Metacloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from dogpile.cache import api + + +NO_VALUE = api.NO_VALUE + + +class NoopCacheBackend(api.CacheBackend): + """A no op backend as a default caching backend. + + The no op backend is provided as the default caching backend for keystone + to ensure that ``dogpile.cache.memory`` is not used in any real-world + circumstances unintentionally. ``dogpile.cache.memory`` does not have a + mechanism to cleanup it's internal dict and therefore could cause run-away + memory utilization. + """ + def __init__(self, *args): + return + + def get(self, key): + return NO_VALUE + + def get_multi(self, keys): + return [NO_VALUE for x in keys] + + def set(self, key, value): + return + + def set_multi(self, mapping): + return + + def delete(self, key): + return + + def delete_multi(self, keys): + return diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py new file mode 100644 index 00000000..a8b607fa --- /dev/null +++ b/keystone/common/cache/core.py @@ -0,0 +1,183 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 Metacloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Keystone Caching Layer Implementation.""" + +import dogpile.cache + +from dogpile.cache import proxy +from dogpile.cache import util + +from keystone.common import config +from keystone import exception +from keystone.openstack.common import importutils +from keystone.openstack.common import log + + +CONF = config.CONF +LOG = log.getLogger(__name__) +REGION = dogpile.cache.make_region() + +on_arguments = REGION.cache_on_arguments + +dogpile.cache.register_backend( + 'keystone.common.cache.noop', + 'keystone.common.cache.backends.noop', + 'NoopCacheBackend') + + +class DebugProxy(proxy.ProxyBackend): + """Extra Logging ProxyBackend.""" + def get(self, key): + value = self.proxied.get(key) + msg = _('CACHE_GET: Key: "%(key)s)" "%(value)s"') + LOG.debug(msg % {'key': key, 'value': value}) + return value + + def get_multi(self, keys): + values = self.proxied.get_multi(keys) + msg = _('CACHE_GET_MULTI: "%(key)s" "%(value)s"') + LOG.debug(msg % {'keys': keys, 'values': values}) + return values + + def set(self, key, value): + msg = _('CACHE_SET: Key: %(key)s Value: %(value)s') + LOG.debug(msg % {'key': key, 'value': value}) + return self.proxied.set(key, value) + + def set_multi(self, keys): + LOG.debug(_('CACHE_SET_MULTI: %s') % keys) + self.proxied.set_multi(keys) + + def delete(self, key): + self.proxied.delete(key) + LOG.debug(_('CACHE_DELETE: %s') % key) + + def delete_multi(self, keys): + LOG.debug(_('CACHE_DELETE_MULTI: %s') % keys) + self.proxied.delete_multi(keys) + + +def build_cache_config(conf): + """Build the cache region dictionary configuration. + + :param conf: configuration object for keystone + :returns: dict + """ + prefix = conf.cache.config_prefix + conf_dict = {} + conf_dict['%s.backend' % prefix] = conf.cache.backend + conf_dict['%s.expiration_time' % prefix] = conf.cache.expiration_time + for argument in conf.cache.backend_argument: + try: + (argname, argvalue) = argument.split(':', 1) + except ValueError: + msg = _('Unable to build cache config-key. Expected format ' + '":". Skipping unknown format: %s') + LOG.error(msg, argument) + continue + + arg_key = '.'.join([prefix, 'arguments', argname]) + conf_dict[arg_key] = argvalue + + LOG.debug(_('Keystone Cache Config: %s'), conf_dict) + + return conf_dict + + +def configure_cache_region(conf, region=None): + """Configure a cache region. + + :param conf: global keystone configuration object + :param region: optional CacheRegion object, if not provided a new region + will be instantiated + :raises: exception.ValidationError + :returns: dogpile.cache.CacheRegion + """ + if region is not None and not isinstance(region, + dogpile.cache.CacheRegion): + raise exception.ValidationError( + _('region not type dogpile.cache.CacheRegion')) + + if region is None: + region = dogpile.cache.make_region() + + if 'backend' not in region.__dict__: + # NOTE(morganfainberg): this is how you tell if a region is configured. + # There is a request logged with dogpile.cache upstream to make this + # easier / less ugly. + + config_dict = build_cache_config(conf) + region.configure_from_config(config_dict, + '%s.' % conf.cache.config_prefix) + + if conf.cache.debug_cache_backend: + region.wrap(DebugProxy) + + # NOTE(morganfainberg): if the backend requests the use of a + # key_mangler, we should respect that key_mangler function. If a + # key_mangler is not defined by the backend, use the sha1_mangle_key + # mangler provided by dogpile.cache. This ensures we always use a fixed + # size cache-key. This is toggle-able for debug purposes; if disabled + # this could cause issues with certain backends (such as memcached) and + # its limited key-size. + if region.key_mangler is None: + if CONF.cache.use_key_mangler: + region.key_mangler = util.sha1_mangle_key + + for class_path in conf.cache.proxies: + # NOTE(morganfainberg): if we have any proxy wrappers, we should + # ensure they are added to the cache region's backend. Since + # configure_from_config doesn't handle the wrap argument, we need + # to manually add the Proxies. For information on how the + # ProxyBackends work, see the dogpile.cache documents on + # "changing-backend-behavior" + cls = importutils.import_class(class_path) + LOG.debug(_('Adding cache-proxy \'%s\' to backend.') % class_path) + region.wrap(cls) + + return region + + +def should_cache_fn(section): + """Build a function that returns a config section's caching status. + + For any given driver in keystone that has caching capabilities, a boolean + config option for that driver's section (e.g. ``token``) should exist and + default to ``True``. This function will use that value to tell the caching + decorator if caching for that driver is enabled. To properly use this + with the decorator, pass this function the configuration section and assign + the result to a variable. Pass the new variable to the caching decorator + as the named argument ``should_cache_fn``. e.g.: + + from keystone.common import cache + + SHOULD_CACHE = cache.should_cache_fn('token') + + @cache.on_arguments(should_cache_fn=SHOULD_CACHE) + def function(arg1, arg2): + ... + + :param section: name of the configuration section to examine + :type section: string + :returns: function reference + """ + def should_cache(value): + if not CONF.cache.enabled: + return False + conf_group = getattr(CONF, section) + return getattr(conf_group, 'caching', True) + return should_cache From 8c1aebe851fc011f23985b635dfeb27d134bed7c Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Fri, 23 Aug 2013 19:06:42 -0700 Subject: [PATCH 05/47] Implement Caching for Token Revocation List Based upon the Keystone caching (using dogpile.cache) implementation token revocation list caching is implemented in this patchset. The following methods are cached: * token_api.list_revoked_tokens Calls to token_api.delete_token and token_api.delete_tokens will properly invalidate the cache for the revocation list. Reworked some of the caching tests to allow for more in-depth tests of the cache layer. DocImpact partial-blueprint: caching-layer-for-driver-calls Change-Id: I2bc821fa68035884dfb885b17c051f3023e7a9f6 --- keystone/common/cache/core.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index a8b607fa..8b50b789 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -21,7 +21,7 @@ import dogpile.cache from dogpile.cache import proxy from dogpile.cache import util -from keystone.common import config +from keystone import config from keystone import exception from keystone.openstack.common import importutils from keystone.openstack.common import log @@ -31,6 +31,7 @@ CONF = config.CONF LOG = log.getLogger(__name__) REGION = dogpile.cache.make_region() +make_region = dogpile.cache.make_region on_arguments = REGION.cache_on_arguments dogpile.cache.register_backend( @@ -71,17 +72,17 @@ class DebugProxy(proxy.ProxyBackend): self.proxied.delete_multi(keys) -def build_cache_config(conf): +def build_cache_config(): """Build the cache region dictionary configuration. :param conf: configuration object for keystone :returns: dict """ - prefix = conf.cache.config_prefix + prefix = CONF.cache.config_prefix conf_dict = {} - conf_dict['%s.backend' % prefix] = conf.cache.backend - conf_dict['%s.expiration_time' % prefix] = conf.cache.expiration_time - for argument in conf.cache.backend_argument: + conf_dict['%s.backend' % prefix] = CONF.cache.backend + conf_dict['%s.expiration_time' % prefix] = CONF.cache.expiration_time + for argument in CONF.cache.backend_argument: try: (argname, argvalue) = argument.split(':', 1) except ValueError: @@ -98,33 +99,28 @@ def build_cache_config(conf): return conf_dict -def configure_cache_region(conf, region=None): +def configure_cache_region(region): """Configure a cache region. - :param conf: global keystone configuration object :param region: optional CacheRegion object, if not provided a new region will be instantiated :raises: exception.ValidationError :returns: dogpile.cache.CacheRegion """ - if region is not None and not isinstance(region, - dogpile.cache.CacheRegion): + if not isinstance(region, dogpile.cache.CacheRegion): raise exception.ValidationError( _('region not type dogpile.cache.CacheRegion')) - if region is None: - region = dogpile.cache.make_region() - if 'backend' not in region.__dict__: # NOTE(morganfainberg): this is how you tell if a region is configured. # There is a request logged with dogpile.cache upstream to make this # easier / less ugly. - config_dict = build_cache_config(conf) + config_dict = build_cache_config() region.configure_from_config(config_dict, - '%s.' % conf.cache.config_prefix) + '%s.' % CONF.cache.config_prefix) - if conf.cache.debug_cache_backend: + if CONF.cache.debug_cache_backend: region.wrap(DebugProxy) # NOTE(morganfainberg): if the backend requests the use of a @@ -138,7 +134,7 @@ def configure_cache_region(conf, region=None): if CONF.cache.use_key_mangler: region.key_mangler = util.sha1_mangle_key - for class_path in conf.cache.proxies: + for class_path in CONF.cache.proxies: # NOTE(morganfainberg): if we have any proxy wrappers, we should # ensure they are added to the cache region's backend. Since # configure_from_config doesn't handle the wrap argument, we need From acf8ac6bdb39aa296096e8846d71e3e9ebcd0e70 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Wed, 28 Aug 2013 22:04:18 -0700 Subject: [PATCH 06/47] Add Memory Isolating Cache Proxy Created a specialized dogpile.cache proxy to isolate in-memory data in the cache region from modificaiton during tests using liberal application of copy.deepcopy for get/set operations. partial-blueprint: caching-layer-for-driver-calls Change-Id: Id6d27219b75ea7e6709d220542dd31c4a6c7aa04 --- keystone/common/cache/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 8b50b789..ee607aad 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -17,7 +17,6 @@ """Keystone Caching Layer Implementation.""" import dogpile.cache - from dogpile.cache import proxy from dogpile.cache import util From 415f75b9bd39aea9f0cd746b7aae48447fb32ee0 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Sat, 24 Aug 2013 19:16:30 -0700 Subject: [PATCH 07/47] Implement caching for Tokens and Token Validation Based upon the keystone caching (using dogpile.cache) implementation token caching is implemented in this patchset. This patchset inclues basic Token caching as the first implementation of this caching layer. The following methods are cached: * token_api.get_token, * token_api.list_revoked_tokens * token_provider_api.validate_v3_token * token_provider_api.check_v3_token * token_provider_api.check_v2_token * token_provider_api.validate_v2_token * token_provider_api.validate_token Calls to token_api.delete_token and token_api.delete_tokens will properly invalidate the cache for the tokens being acted upon, as well as invalidating the cache for the revoked_tokens_list and the validate/check token calls. Token caching is configurable independantly of the revocation_list caching. Lifted expiration checks from the token drivers to the token manager. This ensures that cached tokens will still raise TokenNotFound when expired. For cache consistency, all token_ids are transformed into the short token hash at the provider and token_driver level. Some methods would have access to the full ID (PKI Tokens), and some methods would not. Cache invalidation is inconsistent without token_id normalization. DocImpact partial-blueprint: caching-layer-for-driver-calls Change-Id: Ifada0db0043fede504a091670ccc521196c2f19c --- keystone/common/cache/core.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index ee607aad..6d3b886f 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -43,18 +43,18 @@ class DebugProxy(proxy.ProxyBackend): """Extra Logging ProxyBackend.""" def get(self, key): value = self.proxied.get(key) - msg = _('CACHE_GET: Key: "%(key)s)" "%(value)s"') + msg = _('CACHE_GET: Key: "%(key)s" Value: "%(value)s"') LOG.debug(msg % {'key': key, 'value': value}) return value def get_multi(self, keys): values = self.proxied.get_multi(keys) - msg = _('CACHE_GET_MULTI: "%(key)s" "%(value)s"') + msg = _('CACHE_GET_MULTI: "%(keys)s" Values: "%(values)s"') LOG.debug(msg % {'keys': keys, 'values': values}) return values def set(self, key, value): - msg = _('CACHE_SET: Key: %(key)s Value: %(value)s') + msg = _('CACHE_SET: Key: "%(key)s" Value: "%(value)s"') LOG.debug(msg % {'key': key, 'value': value}) return self.proxied.set(key, value) From c637ee47d9ca807826da980331acabb1181ae392 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Sat, 24 Aug 2013 21:07:32 -0700 Subject: [PATCH 08/47] Implement basic caching around assignment CRUD Implements caching around basic assignment CRUD actions. * assignment_api.get_domain * assignmnet_api.get_domain_by_name * assignment_api.get_project * assignment_api.get_project_by_name * assignment_api.get_role The Create, Update, and Delete actions for domains, projects and roles will perform proper invalidations of the cached methods listed above. Specific Cache Layer Tests added around assignment CRUD. Modifications of LDAP tests done to handle caching concepts. List methods are not covered by this patchset (list_projects, list_domains, list_roles). Grants are not subject to caching in this patchset. DocImpact partial-blueprint: caching-layer-for-driver-calls Change-Id: Ic4e1a2d93078e55649ce9410ebece9b4d09b095a --- keystone/common/cache/core.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 6d3b886f..fe4a54a6 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -59,15 +59,15 @@ class DebugProxy(proxy.ProxyBackend): return self.proxied.set(key, value) def set_multi(self, keys): - LOG.debug(_('CACHE_SET_MULTI: %s') % keys) + LOG.debug(_('CACHE_SET_MULTI: "%s"') % keys) self.proxied.set_multi(keys) def delete(self, key): self.proxied.delete(key) - LOG.debug(_('CACHE_DELETE: %s') % key) + LOG.debug(_('CACHE_DELETE: "%s"') % key) def delete_multi(self, keys): - LOG.debug(_('CACHE_DELETE_MULTI: %s') % keys) + LOG.debug(_('CACHE_DELETE_MULTI: "%s"') % keys) self.proxied.delete_multi(keys) From 89dbbb6228caf874986524cb176e2261f3a13411 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Sun, 13 Oct 2013 18:34:24 -0700 Subject: [PATCH 09/47] Handle unicode at the caching layer more elegantly This patchset resolves an issue where in some cases unicode would cause the cache key generator to raise a UnicodeEncodeError due to the name/value being outside of the standard ascii character set. Included is a fix to the cache backend debug code to utilize repr for passing the keys/values to the logger. Tests in test_backend provided by chenxiao Closes-bug: 1237892 Change-Id: I4df1c9eb6b4a1367defdbb6bcbcd1f1f992348e5 --- keystone/common/cache/core.py | 40 ++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 8 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index fe4a54a6..0a8da707 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -28,10 +28,8 @@ from keystone.openstack.common import log CONF = config.CONF LOG = log.getLogger(__name__) -REGION = dogpile.cache.make_region() make_region = dogpile.cache.make_region -on_arguments = REGION.cache_on_arguments dogpile.cache.register_backend( 'keystone.common.cache.noop', @@ -41,33 +39,39 @@ dogpile.cache.register_backend( class DebugProxy(proxy.ProxyBackend): """Extra Logging ProxyBackend.""" + # NOTE(morganfainberg): Pass all key/values through repr to ensure we have + # a clean description of the information. Without use of repr, it might + # be possible to run into encode/decode error(s). For logging/debugging + # purposes encode/decode is irrelevant and we should be looking at the + # data exactly as it stands. + def get(self, key): value = self.proxied.get(key) msg = _('CACHE_GET: Key: "%(key)s" Value: "%(value)s"') - LOG.debug(msg % {'key': key, 'value': value}) + LOG.debug(msg % {'key': repr(key), 'value': repr(value)}) return value def get_multi(self, keys): values = self.proxied.get_multi(keys) msg = _('CACHE_GET_MULTI: "%(keys)s" Values: "%(values)s"') - LOG.debug(msg % {'keys': keys, 'values': values}) + LOG.debug(msg % {'keys': repr(keys), 'values': repr(values)}) return values def set(self, key, value): msg = _('CACHE_SET: Key: "%(key)s" Value: "%(value)s"') - LOG.debug(msg % {'key': key, 'value': value}) + LOG.debug(msg % {'key': repr(key), 'value': repr(value)}) return self.proxied.set(key, value) def set_multi(self, keys): - LOG.debug(_('CACHE_SET_MULTI: "%s"') % keys) + LOG.debug(_('CACHE_SET_MULTI: "%s"') % repr(keys)) self.proxied.set_multi(keys) def delete(self, key): self.proxied.delete(key) - LOG.debug(_('CACHE_DELETE: "%s"') % key) + LOG.debug(_('CACHE_DELETE: "%s"') % repr(key)) def delete_multi(self, keys): - LOG.debug(_('CACHE_DELETE_MULTI: "%s"') % keys) + LOG.debug(_('CACHE_DELETE_MULTI: "%s"') % repr(keys)) self.proxied.delete_multi(keys) @@ -176,3 +180,23 @@ def should_cache_fn(section): conf_group = getattr(CONF, section) return getattr(conf_group, 'caching', True) return should_cache + + +def key_generate_to_str(s): + # NOTE(morganfainberg): Since we need to stringify all arguments, attempt + # to stringify and handle the Unicode error explicitly as needed. + try: + return str(s) + except UnicodeEncodeError: + return s.encode('utf-8') + + +def function_key_generator(namespace, fn, to_str=key_generate_to_str): + # NOTE(morganfainberg): This wraps dogpile.cache's default + # function_key_generator to change the default to_str mechanism. + return util.function_key_generator(namespace, fn, to_str=to_str) + + +REGION = dogpile.cache.make_region( + function_key_generator=function_key_generator) +on_arguments = REGION.cache_on_arguments From 465da5006501c5661683c7782505ce49785e3782 Mon Sep 17 00:00:00 2001 From: Lars Butler Date: Tue, 29 Oct 2013 17:23:45 +0100 Subject: [PATCH 10/47] Style improvements to logging format strings Added the following improvements: * Use the logging API to handle formatting of message args, which potentially prevents us from having to do a bunch of redundant string formatting for logging statements which occur below the runtime's logging level. For example, in many places `LOG.debug('foo %s' % bar)` has been replaced with `LOG.debug('foo %s', bar)`. * Similarly, this change also removes a few redundant repr() and str() calls. For example: `LOG.error('foo %r', repr(bar))` -> `LOG.error('foo %r', bar)`, and the same for %s/str(). * One logging statement included some unnecessary escaping in the log message string (e.g., 'foo is a \'bar\'', which was replaced with "foo is a 'bar'"). * Added some missing message translation calls. Change-Id: I9080aebdf0ce0e719949d009e0ab85bfc6be8646 Closes-Bug: #1234283 --- keystone/common/cache/core.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 0a8da707..c95e4200 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -47,31 +47,31 @@ class DebugProxy(proxy.ProxyBackend): def get(self, key): value = self.proxied.get(key) - msg = _('CACHE_GET: Key: "%(key)s" Value: "%(value)s"') - LOG.debug(msg % {'key': repr(key), 'value': repr(value)}) + LOG.debug(_('CACHE_GET: Key: "%(key)r" Value: "%(value)r"'), + {'key': key, 'value': value}) return value def get_multi(self, keys): values = self.proxied.get_multi(keys) - msg = _('CACHE_GET_MULTI: "%(keys)s" Values: "%(values)s"') - LOG.debug(msg % {'keys': repr(keys), 'values': repr(values)}) + LOG.debug(_('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"'), + {'keys': keys, 'values': values}) return values def set(self, key, value): - msg = _('CACHE_SET: Key: "%(key)s" Value: "%(value)s"') - LOG.debug(msg % {'key': repr(key), 'value': repr(value)}) + LOG.debug(_('CACHE_SET: Key: "%(key)r" Value: "%(value)r"'), + {'key': key, 'value': value}) return self.proxied.set(key, value) def set_multi(self, keys): - LOG.debug(_('CACHE_SET_MULTI: "%s"') % repr(keys)) + LOG.debug(_('CACHE_SET_MULTI: "%r"'), keys) self.proxied.set_multi(keys) def delete(self, key): self.proxied.delete(key) - LOG.debug(_('CACHE_DELETE: "%s"') % repr(key)) + LOG.debug(_('CACHE_DELETE: "%r"'), key) def delete_multi(self, keys): - LOG.debug(_('CACHE_DELETE_MULTI: "%s"') % repr(keys)) + LOG.debug(_('CACHE_DELETE_MULTI: "%r"'), keys) self.proxied.delete_multi(keys) @@ -145,7 +145,7 @@ def configure_cache_region(region): # ProxyBackends work, see the dogpile.cache documents on # "changing-backend-behavior" cls = importutils.import_class(class_path) - LOG.debug(_('Adding cache-proxy \'%s\' to backend.') % class_path) + LOG.debug(_("Adding cache-proxy '%s' to backend."), class_path) region.wrap(cls) return region From 4ddcc26fdded7da90bc0a684ad4f7feb3e04278c Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Fri, 20 Dec 2013 18:15:17 -0600 Subject: [PATCH 11/47] Documentation cleanup Several warnings and errors were reported when building documentation. This makes it difficult for developers to validate that new changes to don't have problems. Most of the warnings and errors are fixed with this change. Change-Id: Ifb9edffce66dde27fbe72bb1ce1b997b041786ae --- keystone/common/cache/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index c95e4200..a8f3ba0e 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -160,7 +160,7 @@ def should_cache_fn(section): decorator if caching for that driver is enabled. To properly use this with the decorator, pass this function the configuration section and assign the result to a variable. Pass the new variable to the caching decorator - as the named argument ``should_cache_fn``. e.g.: + as the named argument ``should_cache_fn``. e.g.:: from keystone.common import cache From 6a5d4843fece8ca7d98c9cb687a9bf43ac66ce6a Mon Sep 17 00:00:00 2001 From: Eric Guo Date: Sat, 8 Feb 2014 23:15:34 +0800 Subject: [PATCH 12/47] Remove vim header We don't need vim modelines in each source file, it can be set in user's vimrc. Change-Id: Ie51ad62946afdf39eadcd59edaf8134ec10265c6 Closes-Bug: #1229324 --- keystone/common/cache/__init__.py | 2 -- keystone/common/cache/backends/__init__.py | 2 -- keystone/common/cache/backends/noop.py | 2 -- keystone/common/cache/core.py | 2 -- 4 files changed, 8 deletions(-) diff --git a/keystone/common/cache/__init__.py b/keystone/common/cache/__init__.py index c556c80b..ec7de293 100644 --- a/keystone/common/cache/__init__.py +++ b/keystone/common/cache/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/keystone/common/cache/backends/__init__.py b/keystone/common/cache/backends/__init__.py index 42ad3383..fea79e66 100644 --- a/keystone/common/cache/backends/__init__.py +++ b/keystone/common/cache/backends/__init__.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/keystone/common/cache/backends/noop.py b/keystone/common/cache/backends/noop.py index 646610a3..38329c94 100644 --- a/keystone/common/cache/backends/noop.py +++ b/keystone/common/cache/backends/noop.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index a8f3ba0e..0ced079d 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -1,5 +1,3 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - # Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may From 5554a80c19849ea98de84d97fa674644cf431f79 Mon Sep 17 00:00:00 2001 From: Eric Guo Date: Sat, 8 Feb 2014 23:34:21 +0800 Subject: [PATCH 13/47] Remove copyright from empty files According to policy change in HACKING: http://docs.openstack.org/developer/hacking/#openstack-licensing empty files should no longer contain copyright notices. Change-Id: I65e85baf72cf1a413c4b4c8c478587246d4c319a Closes-Bug: #1262424 --- keystone/common/cache/backends/__init__.py | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/keystone/common/cache/backends/__init__.py b/keystone/common/cache/backends/__init__.py index fea79e66..e69de29b 100644 --- a/keystone/common/cache/backends/__init__.py +++ b/keystone/common/cache/backends/__init__.py @@ -1,13 +0,0 @@ -# Copyright 2013 Metacloud -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. From fb5e8c9b5a40f63038d888d51d081ce0528ddb80 Mon Sep 17 00:00:00 2001 From: Arun Kant Date: Fri, 7 Feb 2014 14:24:39 -0800 Subject: [PATCH 14/47] Support for mongo as dogpile cache backend. With this new optional caching backend, MongoDB can be used for caching data. Change-Id: I25ba1cac9456d5e125a5eac99d42330507d4e329 Blueprint: mongodb-dogpile-caching-backend --- keystone/common/cache/backends/mongo.py | 557 ++++++++++++++++++++++++ keystone/common/cache/core.py | 5 + 2 files changed, 562 insertions(+) create mode 100644 keystone/common/cache/backends/mongo.py diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py new file mode 100644 index 00000000..65ab3d1d --- /dev/null +++ b/keystone/common/cache/backends/mongo.py @@ -0,0 +1,557 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import datetime + +from dogpile.cache import api +from dogpile.cache import util as dp_util +import six + +from keystone import exception +from keystone.openstack.common import importutils +from keystone.openstack.common import log +from keystone.openstack.common import timeutils + + +NO_VALUE = api.NO_VALUE +LOG = log.getLogger(__name__) + + +class MongoCacheBackend(api.CacheBackend): + """A MongoDB based caching backend implementing dogpile backend APIs. + + Arguments accepted in the arguments dictionary: + + :param db_hosts: string (required), hostname or IP address of the + MongoDB server instance. This can be a single MongoDB connection URI, + or a list of MongoDB connection URIs. + + :param db_name: string (required), the name of the database to be used. + + :param cache_collection: string (required), the name of collection to store + cached data. + *Note:* Different collection name can be provided if there is need to + create separate container (i.e. collection) for cache data. So region + configuration is done per collection. + + Following are optional parameters for MongoDB backend configuration, + + :param username: string, the name of the user to authenticate. + + :param password: string, the password of the user to authenticate. + + :param max_pool_size: integer, the maximum number of connections that the + pool will open simultaneously. By default the pool size is 10. + + :param w: integer, write acknowledgement for MongoDB client + + If not provided, then no default is set on MongoDB and then write + acknowledgement behavior occurs as per MongoDB default. This parameter + name is same as what is used in MongoDB docs. This value is specified + at collection level so its applicable to `cache_collection` db write + operations. + + If this is a replica set, write operations will block until they have + been replicated to the specified number or tagged set of servers. + Setting w=0 disables write acknowledgement and all other write concern + options. + + :param read_preference: string, the read preference mode for MongoDB client + Expected value is ``primary``, ``primaryPreferred``, ``secondary``, + ``secondaryPreferred``, or ``nearest``. This read_preference is + specified at collection level so its applicable to `cache_collection` + db read operations. + + :param use_replica: boolean, flag to indicate if replica client to be + used. Default is `False`. `replicaset_name` value is required if + `True`. + + :param replicaset_name: string, name of replica set. + Becomes required if `use_replica` is `True` + + :param son_manipulator: string, name of class with module name which + implements MongoDB SONManipulator. + Default manipulator used is :class:`.BaseTransform`. + + This manipulator is added per database. In multiple cache + configurations, the manipulator name should be same if same + database name ``db_name`` is used in those configurations. + + SONManipulator is used to manipulate custom data types as they are + saved or retrieved from MongoDB. Custom impl is only needed if cached + data is custom class and needs transformations when saving or reading + from db. If dogpile cached value contains built-in data types, then + BaseTransform class is sufficient as it already handles dogpile + CachedValue class transformation. + + :param mongo_ttl_seconds: integer, interval in seconds to indicate maximum + time-to-live value. + If value is greater than 0, then its assumed that cache_collection + needs to be TTL type (has index at 'doc_date' field). + By default, the value is -1 and its disabled. + Reference: + + .. NOTE:: + + This parameter is different from Dogpile own + expiration_time, which is the number of seconds after which Dogpile + will consider the value to be expired. When Dogpile considers a + value to be expired, it continues to use the value until generation + of a new value is complete, when using CacheRegion.get_or_create(). + Therefore, if you are setting `mongo_ttl_seconds`, you will want to + make sure it is greater than expiration_time by at least enough + seconds for new values to be generated, else the value would not + be available during a regeneration, forcing all threads to wait for + a regeneration each time a value expires. + + :param ssl: boolean, If True, create the connection to the server + using SSL. Default is `False`. Client SSL connection parameters depends + on server side SSL setup. For further reference on SSL configuration: + + + :param ssl_keyfile: string, the private keyfile used to identify the + local connection against mongod. If included with the certfile then + only the `ssl_certfile` is needed. Used only when `ssl` is `True`. + + :param ssl_certfile: string, the certificate file used to identify the + local connection against mongod. Used only when `ssl` is `True`. + + :param ssl_ca_certs: string, the ca_certs file contains a set of + concatenated 'certification authority' certificates, which are used to + validate certificates passed from the other end of the connection. + Used only when `ssl` is `True`. + + :param ssl_cert_reqs: string, the parameter cert_reqs specifies whether + a certificate is required from the other side of the connection, and + whether it will be validated if provided. It must be one of the three + values ``ssl.CERT_NONE`` (certificates ignored), ``ssl.CERT_OPTIONAL`` + (not required, but validated if provided), or + ``ssl.CERT_REQUIRED`` (required and validated). If the value of this + parameter is not ``ssl.CERT_NONE``, then the ssl_ca_certs parameter + must point to a file of CA certificates. Used only when `ssl` + is `True`. + + Rest of arguments are passed to mongo calls for read, write and remove. + So related options can be specified to pass to these operations. + + Further details of various supported arguments can be referred from + + + """ + + def __init__(self, arguments): + self.api = MongoApi(arguments) + + @dp_util.memoized_property + def client(self): + """Initializes MongoDB connection and collection defaults. + + This initialization is done only once and performed as part of lazy + inclusion of MongoDB dependency i.e. add imports only if related + backend is used. + + :return: :class:`.MongoApi` instance + """ + self.api.get_cache_collection() + return self.api + + def get(self, key): + value = self.client.get(key) + if value is None: + return NO_VALUE + else: + return value + + def get_multi(self, keys): + values = self.client.get_multi(keys) + return [ + NO_VALUE if key not in values + else values[key] for key in keys + ] + + def set(self, key, value): + self.client.set(key, value) + + def set_multi(self, mapping): + self.client.set_multi(mapping) + + def delete(self, key): + self.client.delete(key) + + def delete_multi(self, keys): + self.client.delete_multi(keys) + + +class MongoApi(object): + """Class handling MongoDB specific functionality. + + This class uses PyMongo APIs internally to create database connection + with configured pool size, ensures unique index on key, does database + authentication and ensure TTL collection index if configured so. + This class also serves as handle to cache collection for dogpile cache + APIs. + + In a single deployment, multiple cache configuration can be defined. In + that case of multiple cache collections usage, db client connection pool + is shared when cache collections are within same database. + """ + + # class level attributes for re-use of db client connection and collection + _DB = {} # dict of db_name: db connection reference + _MONGO_COLLS = {} # dict of cache_collection : db collection reference + + def __init__(self, arguments): + self._init_args(arguments) + self._data_manipulator = None + + def _init_args(self, arguments): + """Helper logic for collecting and parsing MongoDB specific arguments. + + The arguments passed in are separated out in connection specific + setting and rest of arguments are passed to create/update/delete + db operations. + """ + self.conn_kwargs = {} # connection specific arguments + + self.hosts = arguments.pop('db_hosts', None) + if self.hosts is None: + msg = _('db_hosts value is required') + raise exception.ValidationError(message=msg) + + self.db_name = arguments.pop('db_name', None) + if self.db_name is None: + msg = _('database db_name is required') + raise exception.ValidationError(message=msg) + + self.cache_collection = arguments.pop('cache_collection', None) + if self.cache_collection is None: + msg = _('cache_collection name is required') + raise exception.ValidationError(message=msg) + + self.username = arguments.pop('username', None) + self.password = arguments.pop('password', None) + self.max_pool_size = arguments.pop('max_pool_size', 10) + + self.w = arguments.pop('w', -1) + try: + self.w = int(self.w) + except ValueError: + msg = _('integer value expected for w (write concern attribute)') + raise exception.ValidationError(message=msg) + + self.read_preference = arguments.pop('read_preference', None) + + self.use_replica = arguments.pop('use_replica', False) + if self.use_replica: + if arguments.get('replicaset_name') is None: + msg = _('replicaset_name required when use_replica is True') + raise exception.ValidationError(message=msg) + self.replicaset_name = arguments.get('replicaset_name') + + self.son_manipulator = arguments.pop('son_manipulator', None) + + # set if mongo collection needs to be TTL type. + # This needs to be max ttl for any cache entry. + # By default, -1 means don't use TTL collection. + # With ttl set, it creates related index and have doc_date field with + # needed expiration interval + self.ttl_seconds = arguments.pop('mongo_ttl_seconds', -1) + try: + self.ttl_seconds = int(self.ttl_seconds) + except ValueError: + msg = _('integer value expected for mongo_ttl_seconds') + raise exception.ValidationError(message=msg) + + self.conn_kwargs['ssl'] = arguments.pop('ssl', False) + if self.conn_kwargs['ssl']: + ssl_keyfile = arguments.pop('ssl_keyfile', None) + ssl_certfile = arguments.pop('ssl_certfile', None) + ssl_ca_certs = arguments.pop('ssl_ca_certs', None) + ssl_cert_reqs = arguments.pop('ssl_cert_reqs', None) + if ssl_keyfile: + self.conn_kwargs['ssl_keyfile'] = ssl_keyfile + if ssl_certfile: + self.conn_kwargs['ssl_certfile'] = ssl_certfile + if ssl_ca_certs: + self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs + if ssl_cert_reqs: + self.conn_kwargs['ssl_cert_reqs'] = \ + self._ssl_cert_req_type(ssl_cert_reqs) + + # rest of arguments are passed to mongo crud calls + self.meth_kwargs = arguments + + def _ssl_cert_req_type(self, req_type): + try: + import ssl + except ImportError: + raise exception.ValidationError(_('no ssl support available')) + req_type = req_type.upper() + try: + return { + 'NONE': ssl.CERT_NONE, + 'OPTIONAL': ssl.CERT_OPTIONAL, + 'REQUIRED': ssl.CERT_REQUIRED + }[req_type] + except KeyError: + msg = _('Invalid ssl_cert_reqs value of %s, must be one of ' + '"NONE", "OPTIONAL", "REQUIRED"') % (req_type) + raise exception.ValidationError(message=msg) + + def _get_db(self): + # defer imports until backend is used + global pymongo + import pymongo + if self.use_replica: + connection = pymongo.MongoReplicaSetClient( + host=self.hosts, replicaSet=self.replicaset_name, + max_pool_size=self.max_pool_size, **self.conn_kwargs) + else: # used for standalone node or mongos in sharded setup + connection = pymongo.MongoClient( + host=self.hosts, max_pool_size=self.max_pool_size, + **self.conn_kwargs) + + database = getattr(connection, self.db_name) + + self._assign_data_mainpulator() + database.add_son_manipulator(self._data_manipulator) + if self.username and self.password: + database.authenticate(self.username, self.password) + return database + + def _assign_data_mainpulator(self): + if self._data_manipulator is None: + if self.son_manipulator: + self._data_manipulator = importutils.import_object( + self.son_manipulator) + else: + self._data_manipulator = BaseTransform() + + def _get_doc_date(self): + if self.ttl_seconds > 0: + expire_delta = datetime.timedelta(seconds=self.ttl_seconds) + doc_date = timeutils.utcnow() + expire_delta + else: + doc_date = timeutils.utcnow() + return doc_date + + def get_cache_collection(self): + if self.cache_collection not in self._MONGO_COLLS: + global pymongo + import pymongo + # re-use db client connection if already defined as part of + # earlier dogpile cache configuration + if self.db_name not in self._DB: + self._DB[self.db_name] = self._get_db() + coll = getattr(self._DB[self.db_name], self.cache_collection) + + self._assign_data_mainpulator() + if self.read_preference: + self.read_preference = pymongo.read_preferences.\ + mongos_enum(self.read_preference) + coll.read_preference = self.read_preference + if self.w > -1: + coll.write_concern['w'] = self.w + if self.ttl_seconds > 0: + kwargs = {'expireAfterSeconds': self.ttl_seconds} + coll.ensure_index('doc_date', cache_for=5, **kwargs) + else: + self._validate_ttl_index(coll, self.cache_collection, + self.ttl_seconds) + self._MONGO_COLLS[self.cache_collection] = coll + + return self._MONGO_COLLS[self.cache_collection] + + def _get_cache_entry(self, key, value, meta, doc_date): + """MongoDB cache data representation. + + Storing cache key as ``_id`` field as MongoDB by default creates + unique index on this field. So no need to create separate field and + index for storing cache key. Cache data has additional ``doc_date`` + field for MongoDB TTL collection support. + """ + return dict(_id=key, value=value, meta=meta, doc_date=doc_date) + + def _validate_ttl_index(self, collection, coll_name, ttl_seconds): + """Checks if existing TTL index is removed on a collection. + + This logs warning when existing collection has TTL index defined and + new cache configuration tries to disable index with + ``mongo_ttl_seconds < 0``. In that case, existing index needs + to be addressed first to make new configuration effective. + Refer to MongoDB documentation around TTL index for further details. + """ + indexes = collection.index_information() + for indx_name, index_data in six.iteritems(indexes): + if all(k in index_data for k in ('key', 'expireAfterSeconds')): + existing_value = index_data['expireAfterSeconds'] + fld_present = 'doc_date' in index_data['key'][0] + if fld_present and existing_value > -1 and ttl_seconds < 1: + msg = _('TTL index already exists on db collection ' + '<%(c_name)s>, remove index <%(indx_name)s> first' + ' to make updated mongo_ttl_seconds value to be ' + ' effective') + LOG.warn(msg, {'c_name': coll_name, + 'indx_name': indx_name}) + + def get(self, key): + critieria = {'_id': key} + result = self.get_cache_collection().find_one(spec_or_id=critieria, + **self.meth_kwargs) + if result: + return result['value'] + else: + return None + + def get_multi(self, keys): + db_results = self._get_results_as_dict(keys) + return dict((doc['_id'], doc['value']) for doc in + six.itervalues(db_results)) + + def _get_results_as_dict(self, keys): + critieria = {'_id': {'$in': keys}} + db_results = self.get_cache_collection().find(spec=critieria, + **self.meth_kwargs) + return dict((doc['_id'], doc) for doc in db_results) + + def set(self, key, value): + doc_date = self._get_doc_date() + ref = self._get_cache_entry(key, value.payload, value.metadata, + doc_date) + spec = {'_id': key} + # find and modify does not have manipulator support + # so need to do conversion as part of input document + ref = self._data_manipulator.transform_incoming(ref, self) + self.get_cache_collection().find_and_modify(spec, ref, upsert=True, + **self.meth_kwargs) + + def set_multi(self, mapping): + """Insert multiple documents specified as key, value pairs. + + In this case, multiple documents can be added via insert provided they + do not exist. + Update of multiple existing documents is done one by one + """ + doc_date = self._get_doc_date() + insert_refs = [] + update_refs = [] + existing_docs = self._get_results_as_dict(mapping.keys()) + for key, value in mapping.items(): + ref = self._get_cache_entry(key, value.payload, value.metadata, + doc_date) + if key in existing_docs: + ref['_id'] = existing_docs[key]['_id'] + update_refs.append(ref) + else: + insert_refs.append(ref) + if insert_refs: + self.get_cache_collection().insert(insert_refs, manipulate=True, + **self.meth_kwargs) + for upd_doc in update_refs: + self.get_cache_collection().save(upd_doc, manipulate=True, + **self.meth_kwargs) + + def delete(self, key): + critieria = {'_id': key} + self.get_cache_collection().remove(spec_or_id=critieria, + **self.meth_kwargs) + + def delete_multi(self, keys): + critieria = {'_id': {'$in': keys}} + self.get_cache_collection().remove(spec_or_id=critieria, + **self.meth_kwargs) + + +@six.add_metaclass(abc.ABCMeta) +class AbstractManipulator(object): + """Abstract class with methods which need to be implemented for custom + manipulation. + + Adding this as a base class for :class:`.BaseTransform` instead of adding + import dependency of pymongo specific class i.e. + `pymongo.son_manipulator.SONManipulator` and using that as base class. + This is done to avoid pymongo dependency if MongoDB backend is not used. + """ + @abc.abstractmethod + def transform_incoming(self, son, collection): + """Used while saving data to MongoDB. + + :param son: the SON object to be inserted into the database + :param collection: the collection the object is being inserted into + + :returns: transformed SON object + + """ + raise exception.NotImplemented() + + @abc.abstractmethod + def transform_outgoing(self, son, collection): + """Used while reading data from MongoDB. + + :param son: the SON object being retrieved from the database + :param collection: the collection this object was stored in + + :returns: transformed SON object + """ + raise exception.NotImplemented() + + def will_copy(self): + """Will this SON manipulator make a copy of the incoming document? + + Derived classes that do need to make a copy should override this + method, returning `True` instead of `False`. + + :returns: boolean + """ + return False + + +class BaseTransform(AbstractManipulator): + """Base transformation class to store and read dogpile cached data + from MongoDB. + + This is needed as dogpile internally stores data as a custom class + i.e. dogpile.cache.api.CachedValue + + Note: Custom manipulator needs to always override ``transform_incoming`` + and ``transform_outgoing`` methods. MongoDB manipulator logic specifically + checks that overriden method in instance and its super are different. + """ + + def transform_incoming(self, son, collection): + """Used while saving data to MongoDB.""" + for (key, value) in son.items(): + if isinstance(value, api.CachedValue): + son[key] = value.payload # key is 'value' field here + son['meta'] = value.metadata + elif isinstance(value, dict): # Make sure we recurse into sub-docs + son[key] = self.transform_incoming(value, collection) + return son + + def transform_outgoing(self, son, collection): + """Used while reading data from MongoDB.""" + metadata = None + # make sure its top level dictionary with all expected fields names + # present + if isinstance(son, dict) and all(k in son for k in + ('_id', 'value', 'meta', 'doc_date')): + payload = son.pop('value', None) + metadata = son.pop('meta', None) + for (key, value) in son.items(): + if isinstance(value, dict): + son[key] = self.transform_outgoing(value, collection) + if metadata is not None: + son['value'] = api.CachedValue(payload, metadata) + return son diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 0ced079d..1f80063a 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -34,6 +34,11 @@ dogpile.cache.register_backend( 'keystone.common.cache.backends.noop', 'NoopCacheBackend') +dogpile.cache.register_backend( + 'keystone.cache.mongo', + 'keystone.common.cache.backends.mongo', + 'MongoCacheBackend') + class DebugProxy(proxy.ProxyBackend): """Extra Logging ProxyBackend.""" From 821ebc2d76a22fc21ac82fa2e594395c7522a811 Mon Sep 17 00:00:00 2001 From: Ilya Pekelny Date: Fri, 29 Nov 2013 16:46:08 +0200 Subject: [PATCH 15/47] Uses explicit imports for _ Previously `_` was monkeypatched in tests/core.py and bin/keystone-*. This meant that if a developer was not running the tests exactly as the documentation described they would not work. Even importing certain modules in a interactive Python interpreter would fail unless keystone.tests was imported first. Monkeypatching was removed and explicit import for `_` was added. Co-Authored-By: David Stanek Change-Id: I8b25b5b6d83fb873e25a8fab7686babf1d2261fa Closes-Bug: #1255518 --- keystone/common/cache/backends/mongo.py | 1 + keystone/common/cache/core.py | 1 + 2 files changed, 2 insertions(+) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 65ab3d1d..bde4d8d9 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -20,6 +20,7 @@ from dogpile.cache import util as dp_util import six from keystone import exception +from keystone.openstack.common.gettextutils import _ from keystone.openstack.common import importutils from keystone.openstack.common import log from keystone.openstack.common import timeutils diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 1f80063a..f66b7037 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -20,6 +20,7 @@ from dogpile.cache import util from keystone import config from keystone import exception +from keystone.openstack.common.gettextutils import _ from keystone.openstack.common import importutils from keystone.openstack.common import log From 516c2a1001a6574fbec99a85f13dc10d095fa75b Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 27 Mar 2014 15:09:21 -0500 Subject: [PATCH 16/47] Safer noqa handling "# flake8: noqa" was used in several files. This causes the entire file to not be checked by flake8. This is unsafe, and "# noqa" should be used only on those lines that require it. E712 doesn't honor #noqa, so work around it by assigning True to a variable. Change-Id: I1ddd1c4f4230793f0560241e4559095cb4183d71 --- keystone/common/cache/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keystone/common/cache/__init__.py b/keystone/common/cache/__init__.py index ec7de293..49502399 100644 --- a/keystone/common/cache/__init__.py +++ b/keystone/common/cache/__init__.py @@ -12,4 +12,4 @@ # License for the specific language governing permissions and limitations # under the License. -from keystone.common.cache.core import * # flake8: noqa +from keystone.common.cache.core import * # noqa From 08d5e31742f1b909b5c07040c9c33754cb2a6111 Mon Sep 17 00:00:00 2001 From: David Stanek Date: Mon, 31 Mar 2014 00:26:17 +0000 Subject: [PATCH 17/47] Fix cache configuration checks There were two different checks to see if a cache region was already configured with a backend. Both were changed to use the Region.is_configured property. The initial reason for this change was that the hasattr approach in the test fixture was causing some issues in Python 3. For some reason it was forcing the __get__ lookup on the descriptor and Python 2 did not. bp python3 Change-Id: I409656f5ede08160cc68d48e7a3a636328e3e9a6 --- keystone/common/cache/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index f66b7037..7703652f 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -118,7 +118,7 @@ def configure_cache_region(region): raise exception.ValidationError( _('region not type dogpile.cache.CacheRegion')) - if 'backend' not in region.__dict__: + if not region.is_configured: # NOTE(morganfainberg): this is how you tell if a region is configured. # There is a request logged with dogpile.cache upstream to make this # easier / less ugly. From a5fee9efa9e01dcf498c70d96bab68a9027bb700 Mon Sep 17 00:00:00 2001 From: Alex Gaynor Date: Thu, 1 May 2014 07:14:13 -0700 Subject: [PATCH 18/47] Fixed some typos throughout the codebase Change-Id: I66e421b1743f7b3e3e7ecd34e64f67b6e0a53f2c --- keystone/common/cache/backends/mongo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index bde4d8d9..d7cfd497 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -528,7 +528,7 @@ class BaseTransform(AbstractManipulator): Note: Custom manipulator needs to always override ``transform_incoming`` and ``transform_outgoing`` methods. MongoDB manipulator logic specifically - checks that overriden method in instance and its super are different. + checks that overridden method in instance and its super are different. """ def transform_incoming(self, son, collection): From 37cdddcc3d2fda8c85b0d619e5421969a43bb27d Mon Sep 17 00:00:00 2001 From: Dolph Mathews Date: Tue, 13 May 2014 09:18:51 -0500 Subject: [PATCH 19/47] remove a few backslash line continuations Change-Id: I278eeaacf349691d436b17a2bc1f2315c3ab8429 --- keystone/common/cache/backends/mongo.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index d7cfd497..6a5a8224 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -288,8 +288,8 @@ class MongoApi(object): if ssl_ca_certs: self.conn_kwargs['ssl_ca_certs'] = ssl_ca_certs if ssl_cert_reqs: - self.conn_kwargs['ssl_cert_reqs'] = \ - self._ssl_cert_req_type(ssl_cert_reqs) + self.conn_kwargs['ssl_cert_reqs'] = ( + self._ssl_cert_req_type(ssl_cert_reqs)) # rest of arguments are passed to mongo crud calls self.meth_kwargs = arguments @@ -360,8 +360,8 @@ class MongoApi(object): self._assign_data_mainpulator() if self.read_preference: - self.read_preference = pymongo.read_preferences.\ - mongos_enum(self.read_preference) + self.read_preference = pymongo.read_preferences.mongos_enum( + self.read_preference) coll.read_preference = self.read_preference if self.w > -1: coll.write_concern['w'] = self.w From 1bd04212e00ca174b8e66af39b1dc6c719fed6e3 Mon Sep 17 00:00:00 2001 From: David Stanek Date: Fri, 20 Jun 2014 16:40:22 +0000 Subject: [PATCH 20/47] Adds hacking check for debug logging translations bp more-code-style-automation Change-Id: Id54f322f00b04a165bb4a7b1e24f95bb72b7f068 --- keystone/common/cache/core.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 7703652f..c6a20113 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -51,31 +51,31 @@ class DebugProxy(proxy.ProxyBackend): def get(self, key): value = self.proxied.get(key) - LOG.debug(_('CACHE_GET: Key: "%(key)r" Value: "%(value)r"'), + LOG.debug('CACHE_GET: Key: "%(key)r" Value: "%(value)r"', {'key': key, 'value': value}) return value def get_multi(self, keys): values = self.proxied.get_multi(keys) - LOG.debug(_('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"'), + LOG.debug('CACHE_GET_MULTI: "%(keys)r" Values: "%(values)r"', {'keys': keys, 'values': values}) return values def set(self, key, value): - LOG.debug(_('CACHE_SET: Key: "%(key)r" Value: "%(value)r"'), + LOG.debug('CACHE_SET: Key: "%(key)r" Value: "%(value)r"', {'key': key, 'value': value}) return self.proxied.set(key, value) def set_multi(self, keys): - LOG.debug(_('CACHE_SET_MULTI: "%r"'), keys) + LOG.debug('CACHE_SET_MULTI: "%r"', keys) self.proxied.set_multi(keys) def delete(self, key): self.proxied.delete(key) - LOG.debug(_('CACHE_DELETE: "%r"'), key) + LOG.debug('CACHE_DELETE: "%r"', key) def delete_multi(self, keys): - LOG.debug(_('CACHE_DELETE_MULTI: "%r"'), keys) + LOG.debug('CACHE_DELETE_MULTI: "%r"', keys) self.proxied.delete_multi(keys) @@ -101,7 +101,7 @@ def build_cache_config(): arg_key = '.'.join([prefix, 'arguments', argname]) conf_dict[arg_key] = argvalue - LOG.debug(_('Keystone Cache Config: %s'), conf_dict) + LOG.debug('Keystone Cache Config: %s', conf_dict) return conf_dict @@ -149,7 +149,7 @@ def configure_cache_region(region): # ProxyBackends work, see the dogpile.cache documents on # "changing-backend-behavior" cls = importutils.import_class(class_path) - LOG.debug(_("Adding cache-proxy '%s' to backend."), class_path) + LOG.debug("Adding cache-proxy '%s' to backend.", class_path) region.wrap(cls) return region From 21e6e3719dfa06bd00855fab5451d3af84878e14 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Fri, 27 Jun 2014 14:28:38 -0700 Subject: [PATCH 21/47] Do not support toggling key_manglers in cache layer Due to needing to pass the PKI token (complete id) to the backend, the cache layer needs to ensure that the cache-key is always shortened down to a reasonable length. Disabling the key_mangler should only have ever been done in limited debug configurations. Real deployments would be unable to reliably run without the key mangler (due to cache-key length on most systems being exceeded by the Token calls). This change of ID being passed around is for supporting non-persistent tokens. DocImpact: configuration.rst updated to reflect the change in configuration options for keystone cache layer. bp: non-persistent-tokens Change-Id: Ia4eb3df7ccffa58ee867120f698e24d926f0ec9e --- keystone/common/cache/core.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 7703652f..6515e645 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -134,12 +134,9 @@ def configure_cache_region(region): # key_mangler, we should respect that key_mangler function. If a # key_mangler is not defined by the backend, use the sha1_mangle_key # mangler provided by dogpile.cache. This ensures we always use a fixed - # size cache-key. This is toggle-able for debug purposes; if disabled - # this could cause issues with certain backends (such as memcached) and - # its limited key-size. + # size cache-key. if region.key_mangler is None: - if CONF.cache.use_key_mangler: - region.key_mangler = util.sha1_mangle_key + region.key_mangler = util.sha1_mangle_key for class_path in CONF.cache.proxies: # NOTE(morganfainberg): if we have any proxy wrappers, we should From af2d997c430a0cdb52a9e4f3c59c4220a276b545 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 2 Jul 2014 18:02:08 -0500 Subject: [PATCH 22/47] Use oslo.i18n Keystone was using the I18N functions from oslo-incubator. With this change, Keystone uses the new oslo.i18n library. The tests were adapted to not use internal symbols because these are subject to change. Change-Id: I1b13fcc630952695424fccd91bcd157d702851f1 --- keystone/common/cache/backends/mongo.py | 2 +- keystone/common/cache/core.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 6a5a8224..91f43f77 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -20,7 +20,7 @@ from dogpile.cache import util as dp_util import six from keystone import exception -from keystone.openstack.common.gettextutils import _ +from keystone.i18n import _ from keystone.openstack.common import importutils from keystone.openstack.common import log from keystone.openstack.common import timeutils diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 03c90d39..0fc1353b 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -20,7 +20,7 @@ from dogpile.cache import util from keystone import config from keystone import exception -from keystone.openstack.common.gettextutils import _ +from keystone.i18n import _ from keystone.openstack.common import importutils from keystone.openstack.common import log From 31a5d9bfad48b67ba08bc9704da861b1519c50dc Mon Sep 17 00:00:00 2001 From: Juan Antonio Osorio Date: Thu, 17 Jul 2014 16:31:24 +0300 Subject: [PATCH 23/47] Introduce pragma no cover to asbtract classes Since the unimplemented methods for abstract classes are not meant to be actually used, they shouldn't be included in the code coverage calculation. Thus, to address this I introduced the pragma "no cover" to these classes. Change-Id: Id239a2eb42d8288764b8f374d5d13ebd37af5a7d --- keystone/common/cache/backends/mongo.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 6a5a8224..6732a922 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -495,7 +495,7 @@ class AbstractManipulator(object): :returns: transformed SON object """ - raise exception.NotImplemented() + raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def transform_outgoing(self, son, collection): @@ -506,7 +506,7 @@ class AbstractManipulator(object): :returns: transformed SON object """ - raise exception.NotImplemented() + raise exception.NotImplemented() # pragma: no cover def will_copy(self): """Will this SON manipulator make a copy of the incoming document? From 64429b3aaf22c625334669d9145ca2ddbbbd935f Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Tue, 5 Aug 2014 16:58:59 -0500 Subject: [PATCH 24/47] Use functions in oslo.utils Keystone was using functions in oslo-incubator that have been graduated into oslo.utils. This changes the function calls to use the functions in oslo.utils. Change-Id: I39365042de913e1b3edaf849e3f5578cef0b7b02 --- keystone/common/cache/backends/mongo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index e7a3a035..0a30ea0b 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -17,13 +17,13 @@ import datetime from dogpile.cache import api from dogpile.cache import util as dp_util +from oslo.utils import timeutils import six from keystone import exception from keystone.i18n import _ from keystone.openstack.common import importutils from keystone.openstack.common import log -from keystone.openstack.common import timeutils NO_VALUE = api.NO_VALUE From fbbfbff36a54525b36b552bfe04ada637ecd297e Mon Sep 17 00:00:00 2001 From: Yuriy Taraday Date: Thu, 28 Aug 2014 14:27:58 +0400 Subject: [PATCH 25/47] Add a pool of memcached clients This patchset adds a pool of memcache clients. This pool allows for reuse of a client object, prevents too many client object from being instantiated, and maintains proper tracking of dead servers so as to limit delays when a server (or all servers) become unavailable. The new memcache pool backend is available either by being set as the memcache backend or by using keystone.token.persistence.backends.memcache_pool.Token for the Token memcache persistence driver. [memcache] servers = 127.0.0.1:11211 dead_retry = 300 socket_timeout = 3 pool_maxsize = 10 pool_unused_timeout = 60 Where: - servers - comma-separated list of host:port pairs (was already there); - dead_retry - number of seconds memcached server is considered dead before it is tried again; - socket_timeout - timeout in seconds for every call to a server; - pool_maxsize - max total number of open connections in the pool; - pool_unused_timeout - number of seconds a connection is held unused in the pool before it is closed; The new memcache pool backend can be used as the driver for the Keystone caching layer. To use it as caching driver, set 'keystone.cache.memcache_pool' as the value of the [cache]\backend option, the other options are the same as above, but with 'memcache_' prefix: [cache] backend = keystone.cache.memcache_pool memcache_servers = 127.0.0.1:11211 memcache_dead_retry = 300 memcache_socket_timeout = 3 memcache_pool_maxsize = 10 memcache_pool_unused_timeout = 60 Co-Authored-By: Morgan Fainberg Closes-bug: #1332058 Closes-bug: #1360446 Change-Id: I3544894482b30a47fcd4fac8948d03136fd83f14 --- keystone/common/cache/_memcache_pool.py | 196 ++++++++++++++++++ .../common/cache/backends/memcache_pool.py | 61 ++++++ keystone/common/cache/core.py | 14 ++ 3 files changed, 271 insertions(+) create mode 100644 keystone/common/cache/_memcache_pool.py create mode 100644 keystone/common/cache/backends/memcache_pool.py diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py new file mode 100644 index 00000000..70b86b68 --- /dev/null +++ b/keystone/common/cache/_memcache_pool.py @@ -0,0 +1,196 @@ +# Copyright 2014 Mirantis Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Thread-safe connection pool for python-memcached.""" + +# NOTE(yorik-sar): this file is copied between keystone and keystonemiddleware +# and should be kept in sync until we can use external library for this. + +import collections +import contextlib +import itertools +import logging +import threading +import time + +import memcache +from six.moves import queue + +from keystone import exception +from keystone.i18n import _ +from keystone.openstack.common import log + + +LOG = log.getLogger(__name__) + +# NOTE(morganfainberg): This is used as the maximum number of seconds a get +# of a new connection will wait for before raising an exception indicating +# a serious / most likely non-recoverable delay has occurred. +CONNECTION_GET_TIMEOUT = 120 + +# This 'class' is taken from http://stackoverflow.com/a/22520633/238308 +# Don't inherit client from threading.local so that we can reuse clients in +# different threads +_MemcacheClient = type('_MemcacheClient', (object,), + dict(memcache.Client.__dict__)) + +_PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection']) + + +class ConnectionPool(queue.Queue): + """Base connection pool class + + This class implements the basic connection pool logic as an abstract base + class. + """ + def __init__(self, maxsize, unused_timeout, conn_get_timeout=None): + """Initialize the connection pool. + + :param maxsize: maximum number of client connections for the pool + :type maxsize: int + :param unused_timeout: idle time to live for unused clients (in + seconds). If a client connection object has been + in the pool and idle for longer than the + unused_timeout, it will be reaped. This is to + ensure resources are released as utilization + goes down. + :type unused_timeout: int + :param conn_get_timeout: maximum time in seconds to wait for a + connection. If set to `None` timeout is + indefinite. + :type conn_get_timeout: int + """ + queue.Queue.__init__(self, maxsize) + self._unused_timeout = unused_timeout + self._connection_get_timeout = conn_get_timeout + self._acquired = 0 + + def _create_connection(self): + raise NotImplementedError + + def _destroy_connection(self, conn): + raise NotImplementedError + + def _debug_logger(self, msg, *args, **kwargs): + if LOG.isEnabledFor(logging.DEBUG): + thread_id = threading.current_thread().ident + args = (id(self), thread_id) + args + prefix = 'Memcached pool %s, thread %s: ' + LOG.debug(prefix + msg, *args, **kwargs) + + @contextlib.contextmanager + def acquire(self): + self._debug_logger('Acquiring connection') + try: + conn = self.get(timeout=self._connection_get_timeout) + except queue.Empty: + raise exception.UnexpectedError( + _('Unable to get a connection from pool id %(id)s after ' + '%(seconds)s seconds.') % + {'id': id(self), 'seconds': self._connection_get_timeout}) + self._debug_logger('Acquired connection %s', id(conn)) + try: + yield conn + finally: + self._debug_logger('Releasing connection %s', id(conn)) + self.put(conn) + + def _qsize(self): + return self.maxsize - self._acquired + + if not hasattr(queue.Queue, '_qsize'): + qsize = _qsize + + def _get(self): + if self.queue: + conn = self.queue.pop().connection + else: + conn = self._create_connection() + self._acquired += 1 + return conn + + def _put(self, conn): + self.queue.append(_PoolItem( + ttl=time.time() + self._unused_timeout, + connection=conn, + )) + self._acquired -= 1 + # Drop all expired connections from the right end of the queue + now = time.time() + while self.queue and self.queue[0].ttl < now: + conn = self.queue.popleft().connection + self._debug_logger('Reaping connection %s', id(conn)) + self._destroy_connection(conn) + + +class MemcacheClientPool(ConnectionPool): + def __init__(self, urls, arguments, **kwargs): + ConnectionPool.__init__(self, **kwargs) + self.urls = urls + self._arguments = arguments + # NOTE(morganfainberg): The host objects expect an int for the + # deaduntil value. Initialize this at 0 for each host with 0 indicating + # the host is not dead. + self._hosts_deaduntil = [0] * len(urls) + + def _create_connection(self): + return _MemcacheClient(self.urls, **self._arguments) + + def _destroy_connection(self, conn): + conn.disconnect_all() + + def _get(self): + conn = ConnectionPool._get(self) + try: + # Propagate host state known to us to this client's list + now = time.time() + for deaduntil, host in zip(self._hosts_deaduntil, conn.servers): + if deaduntil > now and host.deaduntil <= now: + host.mark_dead('propagating death mark from the pool') + host.deaduntil = deaduntil + except Exception: + # We need to be sure that connection doesn't leak from the pool. + # This code runs before we enter context manager's try-finally + # block, so we need to explicitly release it here + ConnectionPool._put(self, conn) + raise + return conn + + def _put(self, conn): + try: + # If this client found that one of the hosts is dead, mark it as + # such in our internal list + now = time.time() + for i, deaduntil, host in zip(itertools.count(), + self._hosts_deaduntil, + conn.servers): + # Do nothing if we already know this host is dead + if deaduntil <= now: + if host.deaduntil > now: + self._hosts_deaduntil[i] = host.deaduntil + self._debug_logger( + 'Marked host %s dead until %s', + self.urls[i], host.deaduntil) + else: + self._hosts_deaduntil[i] = 0 + # If all hosts are dead we should forget that they're dead. This + # way we won't get completely shut off until dead_retry seconds + # pass, but will be checking servers as frequent as we can (over + # way smaller socket_timeout) + if all(deaduntil > now for deaduntil in self._hosts_deaduntil): + self._debug_logger('All hosts are dead. Marking them as live.') + self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil) + finally: + ConnectionPool._put(self, conn) diff --git a/keystone/common/cache/backends/memcache_pool.py b/keystone/common/cache/backends/memcache_pool.py new file mode 100644 index 00000000..f3990b12 --- /dev/null +++ b/keystone/common/cache/backends/memcache_pool.py @@ -0,0 +1,61 @@ +# Copyright 2014 Mirantis Inc +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""dogpile.cache backend that uses Memcached connection pool""" + +import functools +import logging + +from dogpile.cache.backends import memcached as memcached_backend + +from keystone.common.cache import _memcache_pool + + +LOG = logging.getLogger(__name__) + + +# Helper to ease backend refactoring +class ClientProxy(object): + def __init__(self, client_pool): + self.client_pool = client_pool + + def _run_method(self, __name, *args, **kwargs): + with self.client_pool.acquire() as client: + return getattr(client, __name)(*args, **kwargs) + + def __getattr__(self, name): + return functools.partial(self._run_method, name) + + +class PooledMemcachedBackend(memcached_backend.MemcachedBackend): + # Composed from GenericMemcachedBackend's and MemcacheArgs's __init__ + def __init__(self, arguments): + super(PooledMemcachedBackend, self).__init__(arguments) + self.client_pool = _memcache_pool.MemcacheClientPool( + self.url, + arguments={ + 'dead_retry': arguments.get('dead_retry', 5 * 60), + 'socket_timeout': arguments.get('socket_timeout', 3), + }, + maxsize=arguments.get('pool_maxsize', 10), + unused_timeout=arguments.get('pool_unused_timeout', 60), + conn_get_timeout=arguments.get('pool_connection_get_timeout', 10), + ) + + # Since all methods in backend just call one of methods of client, this + # lets us avoid need to hack it too much + @property + def client(self): + return ClientProxy(self.client_pool) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 0fc1353b..3ba52874 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -40,6 +40,11 @@ dogpile.cache.register_backend( 'keystone.common.cache.backends.mongo', 'MongoCacheBackend') +dogpile.cache.register_backend( + 'keystone.cache.memcache_pool', + 'keystone.common.cache.backends.memcache_pool', + 'PooledMemcachedBackend') + class DebugProxy(proxy.ProxyBackend): """Extra Logging ProxyBackend.""" @@ -102,6 +107,15 @@ def build_cache_config(): conf_dict[arg_key] = argvalue LOG.debug('Keystone Cache Config: %s', conf_dict) + # NOTE(yorik-sar): these arguments will be used for memcache-related + # backends. Use setdefault for url to support old-style setting through + # backend_argument=url:127.0.0.1:11211 + conf_dict.setdefault('%s.arguments.url' % prefix, + CONF.cache.memcache_servers) + for arg in ('dead_retry', 'socket_timeout', 'pool_maxsize', + 'pool_unused_timeout', 'pool_connection_get_timeout'): + value = getattr(CONF.cache, 'memcache_' + arg) + conf_dict['%s.arguments.%s' % (prefix, arg)] = value return conf_dict From 189e17071ab6f0b2cb920a6b0f2b9fb6775fc93b Mon Sep 17 00:00:00 2001 From: Lance Bragstad Date: Fri, 26 Sep 2014 15:31:24 +0000 Subject: [PATCH 26/47] Address some late comments for memcache clients This change addresses some late comments from the following review: https://review.openstack.org/#/c/119452/31 Change-Id: I031620f9085ff914aa9b99c21387f953b6ada171 Related-Bug: #1360446 --- keystone/common/cache/_memcache_pool.py | 47 +++++++++++++++++-------- 1 file changed, 33 insertions(+), 14 deletions(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index 70b86b68..5b6422a3 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -35,11 +35,6 @@ from keystone.openstack.common import log LOG = log.getLogger(__name__) -# NOTE(morganfainberg): This is used as the maximum number of seconds a get -# of a new connection will wait for before raising an exception indicating -# a serious / most likely non-recoverable delay has occurred. -CONNECTION_GET_TIMEOUT = 120 - # This 'class' is taken from http://stackoverflow.com/a/22520633/238308 # Don't inherit client from threading.local so that we can reuse clients in # different threads @@ -78,9 +73,25 @@ class ConnectionPool(queue.Queue): self._acquired = 0 def _create_connection(self): + """Returns a connection instance. + + This is called when the pool needs another instance created. + + :returns: a new connection instance + + """ raise NotImplementedError def _destroy_connection(self, conn): + """Destroy and cleanup a connection instance. + + This is called when the pool wishes to get rid of an existing + connection. This is the opportunity for a subclass to free up + resources and cleaup after itself. + + :param conn: the connection object to destroy + + """ raise NotImplementedError def _debug_logger(self, msg, *args, **kwargs): @@ -110,6 +121,9 @@ class ConnectionPool(queue.Queue): def _qsize(self): return self.maxsize - self._acquired + # NOTE(dstanek): stdlib and eventlet Queue implementations + # have different names for the qsize method. This ensures + # that we override both of them. if not hasattr(queue.Queue, '_qsize'): qsize = _qsize @@ -121,18 +135,24 @@ class ConnectionPool(queue.Queue): self._acquired += 1 return conn + def _drop_expired_connections(self, conn): + """Drop all expired connections from the right end of the queue. + + :param conn: connection object + """ + now = time.time() + while self.queue and self.queue[0].ttl < now: + conn = self.queue.popleft().connection + self._debug_logger('Reaping connection %s', id(conn)) + self._destroy_connection(conn) + def _put(self, conn): self.queue.append(_PoolItem( ttl=time.time() + self._unused_timeout, connection=conn, )) self._acquired -= 1 - # Drop all expired connections from the right end of the queue - now = time.time() - while self.queue and self.queue[0].ttl < now: - conn = self.queue.popleft().connection - self._debug_logger('Reaping connection %s', id(conn)) - self._destroy_connection(conn) + self._drop_expired_connections(conn) class MemcacheClientPool(ConnectionPool): @@ -173,9 +193,8 @@ class MemcacheClientPool(ConnectionPool): # If this client found that one of the hosts is dead, mark it as # such in our internal list now = time.time() - for i, deaduntil, host in zip(itertools.count(), - self._hosts_deaduntil, - conn.servers): + for i, host in zip(itertools.count(), conn.servers): + deaduntil = self._hosts_deaduntil[i] # Do nothing if we already know this host is dead if deaduntil <= now: if host.deaduntil > now: From 257cb81c5c8843339524726d414374ed362aa0f8 Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Sat, 4 Oct 2014 00:54:00 -0400 Subject: [PATCH 27/47] Use importutils from oslo.utils Rather than sync'ing with oslo-incubator, let's use the library oslo.utils instead, we already import it anyway. We can't remove importutils under keystone/openstack/common/ because it's still used by other common functions. Change-Id: I0a8c2de0fa92090a3631a2d30cc311059d021eae --- keystone/common/cache/backends/mongo.py | 2 +- keystone/common/cache/core.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 0a30ea0b..c9d25d1a 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -17,12 +17,12 @@ import datetime from dogpile.cache import api from dogpile.cache import util as dp_util +from oslo.utils import importutils from oslo.utils import timeutils import six from keystone import exception from keystone.i18n import _ -from keystone.openstack.common import importutils from keystone.openstack.common import log diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 3ba52874..46edb531 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -17,11 +17,11 @@ import dogpile.cache from dogpile.cache import proxy from dogpile.cache import util +from oslo.utils import importutils from keystone import config from keystone import exception from keystone.i18n import _ -from keystone.openstack.common import importutils from keystone.openstack.common import log From c194f81f386b483e027a92c5526854f3d3e9b8e4 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 23 Oct 2014 19:31:38 -0500 Subject: [PATCH 28/47] Remove nonexistant param from docstring Change-Id: Ie6744f0c1b7dfabc6c9a20fc8fb470e69dedaf3f --- keystone/common/cache/core.py | 1 - 1 file changed, 1 deletion(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 46edb531..6a75e49a 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -87,7 +87,6 @@ class DebugProxy(proxy.ProxyBackend): def build_cache_config(): """Build the cache region dictionary configuration. - :param conf: configuration object for keystone :returns: dict """ prefix = CONF.cache.config_prefix From fa732018602ab53a6fb3b615c64550b3d058c408 Mon Sep 17 00:00:00 2001 From: David Stanek Date: Wed, 3 Sep 2014 03:15:16 +0000 Subject: [PATCH 29/47] Adds missing log hints for level E/I/W This patch adds logging hints for the remaining logging statements. bp more-code-style-automation Change-Id: I78801d9d3187df6470b51b3ecb0f182440112d13 --- keystone/common/cache/backends/mongo.py | 10 +++++----- keystone/common/cache/core.py | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index c9d25d1a..199845cf 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -22,7 +22,7 @@ from oslo.utils import timeutils import six from keystone import exception -from keystone.i18n import _ +from keystone.i18n import _, _LW from keystone.openstack.common import log @@ -400,10 +400,10 @@ class MongoApi(object): existing_value = index_data['expireAfterSeconds'] fld_present = 'doc_date' in index_data['key'][0] if fld_present and existing_value > -1 and ttl_seconds < 1: - msg = _('TTL index already exists on db collection ' - '<%(c_name)s>, remove index <%(indx_name)s> first' - ' to make updated mongo_ttl_seconds value to be ' - ' effective') + msg = _LW('TTL index already exists on db collection ' + '<%(c_name)s>, remove index <%(indx_name)s> ' + 'first to make updated mongo_ttl_seconds value ' + 'to be effective') LOG.warn(msg, {'c_name': coll_name, 'indx_name': indx_name}) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 6a75e49a..3f7a86e3 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -21,7 +21,7 @@ from oslo.utils import importutils from keystone import config from keystone import exception -from keystone.i18n import _ +from keystone.i18n import _, _LE from keystone.openstack.common import log @@ -97,8 +97,8 @@ def build_cache_config(): try: (argname, argvalue) = argument.split(':', 1) except ValueError: - msg = _('Unable to build cache config-key. Expected format ' - '":". Skipping unknown format: %s') + msg = _LE('Unable to build cache config-key. Expected format ' + '":". Skipping unknown format: %s') LOG.error(msg, argument) continue From 7af1edc782dc311fd1f38afa4ca493d0e48f1d1d Mon Sep 17 00:00:00 2001 From: Alexander Makarov Date: Mon, 8 Dec 2014 20:51:02 +0300 Subject: [PATCH 30/47] Memcache connection pool excess check When memcache pool depletes exceeding connections are created. Then they are pushed back the to pool via a blocking operation ``put``. As a result thread, attempting to release connection, blocks. This patch adds a check preventing pushung the excess back to the pool, closing released connection instead. Change-Id: I17756fb5f27cb6a950c647590e2a9e0a83c336ca Closes-Bug: #1401108 Closes-Bug: #1400326 --- keystone/common/cache/_memcache_pool.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index 5b6422a3..da897522 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -116,10 +116,20 @@ class ConnectionPool(queue.Queue): yield conn finally: self._debug_logger('Releasing connection %s', id(conn)) - self.put(conn) + self._drop_expired_connections() + try: + super(ConnectionPool, self).put(conn, block=False) + except queue.Full: + self._debug_logger('Reaping exceeding connection %s', id(conn)) + self._destroy_connection(conn) def _qsize(self): - return self.maxsize - self._acquired + if self.maxsize: + return self.maxsize - self._acquired + else: + # A value indicating there is always a free connection + # if maxsize is None or 0 + return 1 # NOTE(dstanek): stdlib and eventlet Queue implementations # have different names for the qsize method. This ensures @@ -135,11 +145,8 @@ class ConnectionPool(queue.Queue): self._acquired += 1 return conn - def _drop_expired_connections(self, conn): - """Drop all expired connections from the right end of the queue. - - :param conn: connection object - """ + def _drop_expired_connections(self): + """Drop all expired connections from the right end of the queue.""" now = time.time() while self.queue and self.queue[0].ttl < now: conn = self.queue.popleft().connection @@ -152,7 +159,6 @@ class ConnectionPool(queue.Queue): connection=conn, )) self._acquired -= 1 - self._drop_expired_connections(conn) class MemcacheClientPool(ConnectionPool): From 3380ebd9af00b3021008afaac9fa9bc50462f975 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Fri, 16 Jan 2015 17:15:18 -0600 Subject: [PATCH 31/47] Change oslo.utils to oslo_utils The oslo libraries are moving away from namespace packages. A hacking check is added to enforce use of the new location. bp drop-namespace-packages Change-Id: I4ece3ad26c1888388a4a8839f7acf260228a9c71 --- keystone/common/cache/backends/mongo.py | 4 ++-- keystone/common/cache/core.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 199845cf..ca2eba4f 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -17,8 +17,8 @@ import datetime from dogpile.cache import api from dogpile.cache import util as dp_util -from oslo.utils import importutils -from oslo.utils import timeutils +from oslo_utils import importutils +from oslo_utils import timeutils import six from keystone import exception diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 3f7a86e3..34dd3a3d 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -17,7 +17,7 @@ import dogpile.cache from dogpile.cache import proxy from dogpile.cache import util -from oslo.utils import importutils +from oslo_utils import importutils from keystone import config from keystone import exception From 5a42dfaec5acf7ca7e5d0c537d0c86c8d8943a27 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Thu, 12 Feb 2015 17:30:27 -0600 Subject: [PATCH 32/47] Move existing tests to unit The existing test files are all moved under keystone.tests.unit, except the existing keystone.tests.unit are left in place. The .testr.conf is updated so that unit tests are run by default in tox envs, and a tox env can override the tests to run by setting OS_TEST_PATH. This is so functional tests can sit in keystone.tests.functional. Change-Id: I065d3f56e22f344abdadd92b3b384b002b02d989 --- keystone/tests/unit/test_cache.py | 246 ++++++ .../tests/unit/test_cache_backend_mongo.py | 728 ++++++++++++++++++ 2 files changed, 974 insertions(+) create mode 100644 keystone/tests/unit/test_cache.py create mode 100644 keystone/tests/unit/test_cache_backend_mongo.py diff --git a/keystone/tests/unit/test_cache.py b/keystone/tests/unit/test_cache.py new file mode 100644 index 00000000..b3223c81 --- /dev/null +++ b/keystone/tests/unit/test_cache.py @@ -0,0 +1,246 @@ +# Copyright 2013 Metacloud +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from dogpile.cache import api +from dogpile.cache import proxy + +from keystone.common import cache +from keystone import config +from keystone import exception +from keystone.tests import unit as tests + + +CONF = config.CONF +NO_VALUE = api.NO_VALUE + + +def _copy_value(value): + if value is not NO_VALUE: + value = copy.deepcopy(value) + return value + + +# NOTE(morganfainberg): WARNING - It is not recommended to use the Memory +# backend for dogpile.cache in a real deployment under any circumstances. The +# backend does no cleanup of expired values and therefore will leak memory. The +# backend is not implemented in a way to share data across processes (e.g. +# Keystone in HTTPD. This proxy is a hack to get around the lack of isolation +# of values in memory. Currently it blindly stores and retrieves the values +# from the cache, and modifications to dicts/lists/etc returned can result in +# changes to the cached values. In short, do not use the dogpile.cache.memory +# backend unless you are running tests or expecting odd/strange results. +class CacheIsolatingProxy(proxy.ProxyBackend): + """Proxy that forces a memory copy of stored values. + The default in-memory cache-region does not perform a copy on values it + is meant to cache. Therefore if the value is modified after set or after + get, the cached value also is modified. This proxy does a copy as the last + thing before storing data. + """ + def get(self, key): + return _copy_value(self.proxied.get(key)) + + def set(self, key, value): + self.proxied.set(key, _copy_value(value)) + + +class TestProxy(proxy.ProxyBackend): + def get(self, key): + value = _copy_value(self.proxied.get(key)) + if value is not NO_VALUE: + if isinstance(value[0], TestProxyValue): + value[0].cached = True + return value + + +class TestProxyValue(object): + def __init__(self, value): + self.value = value + self.cached = False + + +class CacheRegionTest(tests.TestCase): + + def setUp(self): + super(CacheRegionTest, self).setUp() + self.region = cache.make_region() + cache.configure_cache_region(self.region) + self.region.wrap(TestProxy) + self.test_value = TestProxyValue('Decorator Test') + + def _add_test_caching_option(self): + self.config_fixture.register_opt( + config.config.cfg.BoolOpt('caching', default=True), group='cache') + + def _get_cacheable_function(self): + SHOULD_CACHE_FN = cache.should_cache_fn('cache') + + @self.region.cache_on_arguments(should_cache_fn=SHOULD_CACHE_FN) + def cacheable_function(value): + return value + + return cacheable_function + + def test_region_built_with_proxy_direct_cache_test(self): + # Verify cache regions are properly built with proxies. + test_value = TestProxyValue('Direct Cache Test') + self.region.set('cache_test', test_value) + cached_value = self.region.get('cache_test') + self.assertTrue(cached_value.cached) + + def test_cache_region_no_error_multiple_config(self): + # Verify configuring the CacheRegion again doesn't error. + cache.configure_cache_region(self.region) + cache.configure_cache_region(self.region) + + def test_should_cache_fn_global_cache_enabled(self): + # Verify should_cache_fn generates a sane function for subsystem and + # functions as expected with caching globally enabled. + cacheable_function = self._get_cacheable_function() + + self.config_fixture.config(group='cache', enabled=True) + cacheable_function(self.test_value) + cached_value = cacheable_function(self.test_value) + self.assertTrue(cached_value.cached) + + def test_should_cache_fn_global_cache_disabled(self): + # Verify should_cache_fn generates a sane function for subsystem and + # functions as expected with caching globally disabled. + cacheable_function = self._get_cacheable_function() + + self.config_fixture.config(group='cache', enabled=False) + cacheable_function(self.test_value) + cached_value = cacheable_function(self.test_value) + self.assertFalse(cached_value.cached) + + def test_should_cache_fn_global_cache_disabled_section_cache_enabled(self): + # Verify should_cache_fn generates a sane function for subsystem and + # functions as expected with caching globally disabled and the specific + # section caching enabled. + cacheable_function = self._get_cacheable_function() + + self._add_test_caching_option() + self.config_fixture.config(group='cache', enabled=False) + self.config_fixture.config(group='cache', caching=True) + + cacheable_function(self.test_value) + cached_value = cacheable_function(self.test_value) + self.assertFalse(cached_value.cached) + + def test_should_cache_fn_global_cache_enabled_section_cache_disabled(self): + # Verify should_cache_fn generates a sane function for subsystem and + # functions as expected with caching globally enabled and the specific + # section caching disabled. + cacheable_function = self._get_cacheable_function() + + self._add_test_caching_option() + self.config_fixture.config(group='cache', enabled=True) + self.config_fixture.config(group='cache', caching=False) + + cacheable_function(self.test_value) + cached_value = cacheable_function(self.test_value) + self.assertFalse(cached_value.cached) + + def test_should_cache_fn_global_cache_enabled_section_cache_enabled(self): + # Verify should_cache_fn generates a sane function for subsystem and + # functions as expected with caching globally enabled and the specific + # section caching enabled. + cacheable_function = self._get_cacheable_function() + + self._add_test_caching_option() + self.config_fixture.config(group='cache', enabled=True) + self.config_fixture.config(group='cache', caching=True) + + cacheable_function(self.test_value) + cached_value = cacheable_function(self.test_value) + self.assertTrue(cached_value.cached) + + def test_cache_dictionary_config_builder(self): + """Validate we build a sane dogpile.cache dictionary config.""" + self.config_fixture.config(group='cache', + config_prefix='test_prefix', + backend='some_test_backend', + expiration_time=86400, + backend_argument=['arg1:test', + 'arg2:test:test', + 'arg3.invalid']) + + config_dict = cache.build_cache_config() + self.assertEqual( + CONF.cache.backend, config_dict['test_prefix.backend']) + self.assertEqual( + CONF.cache.expiration_time, + config_dict['test_prefix.expiration_time']) + self.assertEqual('test', config_dict['test_prefix.arguments.arg1']) + self.assertEqual('test:test', + config_dict['test_prefix.arguments.arg2']) + self.assertNotIn('test_prefix.arguments.arg3', config_dict) + + def test_cache_debug_proxy(self): + single_value = 'Test Value' + single_key = 'testkey' + multi_values = {'key1': 1, 'key2': 2, 'key3': 3} + + self.region.set(single_key, single_value) + self.assertEqual(single_value, self.region.get(single_key)) + + self.region.delete(single_key) + self.assertEqual(NO_VALUE, self.region.get(single_key)) + + self.region.set_multi(multi_values) + cached_values = self.region.get_multi(multi_values.keys()) + for value in multi_values.values(): + self.assertIn(value, cached_values) + self.assertEqual(len(multi_values.values()), len(cached_values)) + + self.region.delete_multi(multi_values.keys()) + for value in self.region.get_multi(multi_values.keys()): + self.assertEqual(NO_VALUE, value) + + def test_configure_non_region_object_raises_error(self): + self.assertRaises(exception.ValidationError, + cache.configure_cache_region, + "bogus") + + +class CacheNoopBackendTest(tests.TestCase): + + def setUp(self): + super(CacheNoopBackendTest, self).setUp() + self.region = cache.make_region() + cache.configure_cache_region(self.region) + + def config_overrides(self): + super(CacheNoopBackendTest, self).config_overrides() + self.config_fixture.config(group='cache', + backend='keystone.common.cache.noop') + + def test_noop_backend(self): + single_value = 'Test Value' + single_key = 'testkey' + multi_values = {'key1': 1, 'key2': 2, 'key3': 3} + + self.region.set(single_key, single_value) + self.assertEqual(NO_VALUE, self.region.get(single_key)) + + self.region.set_multi(multi_values) + cached_values = self.region.get_multi(multi_values.keys()) + self.assertEqual(len(cached_values), len(multi_values.values())) + for value in cached_values: + self.assertEqual(NO_VALUE, value) + + # Delete should not raise exceptions + self.region.delete(single_key) + self.region.delete_multi(multi_values.keys()) diff --git a/keystone/tests/unit/test_cache_backend_mongo.py b/keystone/tests/unit/test_cache_backend_mongo.py new file mode 100644 index 00000000..639dc1b1 --- /dev/null +++ b/keystone/tests/unit/test_cache_backend_mongo.py @@ -0,0 +1,728 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import copy +import functools +import uuid + +from dogpile.cache import api +from dogpile.cache import region as dp_region +import six + +from keystone.common.cache.backends import mongo +from keystone import exception +from keystone.tests import unit as tests + + +# Mock database structure sample where 'ks_cache' is database and +# 'cache' is collection. Dogpile CachedValue data is divided in two +# fields `value` (CachedValue.payload) and `meta` (CachedValue.metadata) +ks_cache = { + "cache": [ + { + "value": { + "serviceType": "identity", + "allVersionsUrl": "https://dummyUrl", + "dateLastModified": "ISODDate(2014-02-08T18:39:13.237Z)", + "serviceName": "Identity", + "enabled": "True" + }, + "meta": { + "v": 1, + "ct": 1392371422.015121 + }, + "doc_date": "ISODate('2014-02-14T09:50:22.015Z')", + "_id": "8251dc95f63842719c077072f1047ddf" + }, + { + "value": "dummyValueX", + "meta": { + "v": 1, + "ct": 1392371422.014058 + }, + "doc_date": "ISODate('2014-02-14T09:50:22.014Z')", + "_id": "66730b9534d146f0804d23729ad35436" + } + ] +} + + +COLLECTIONS = {} +SON_MANIPULATOR = None + + +class MockCursor(object): + + def __init__(self, collection, dataset_factory): + super(MockCursor, self).__init__() + self.collection = collection + self._factory = dataset_factory + self._dataset = self._factory() + self._limit = None + self._skip = None + + def __iter__(self): + return self + + def __next__(self): + if self._skip: + for _ in range(self._skip): + next(self._dataset) + self._skip = None + if self._limit is not None and self._limit <= 0: + raise StopIteration() + if self._limit is not None: + self._limit -= 1 + return next(self._dataset) + + next = __next__ + + def __getitem__(self, index): + arr = [x for x in self._dataset] + self._dataset = iter(arr) + return arr[index] + + +class MockCollection(object): + + def __init__(self, db, name): + super(MockCollection, self).__init__() + self.name = name + self._collection_database = db + self._documents = {} + self.write_concern = {} + + def __getattr__(self, name): + if name == 'database': + return self._collection_database + + def ensure_index(self, key_or_list, *args, **kwargs): + pass + + def index_information(self): + return {} + + def find_one(self, spec_or_id=None, *args, **kwargs): + if spec_or_id is None: + spec_or_id = {} + if not isinstance(spec_or_id, collections.Mapping): + spec_or_id = {'_id': spec_or_id} + + try: + return next(self.find(spec_or_id, *args, **kwargs)) + except StopIteration: + return None + + def find(self, spec=None, *args, **kwargs): + return MockCursor(self, functools.partial(self._get_dataset, spec)) + + def _get_dataset(self, spec): + dataset = (self._copy_doc(document, dict) for document in + self._iter_documents(spec)) + return dataset + + def _iter_documents(self, spec=None): + return (SON_MANIPULATOR.transform_outgoing(document, self) for + document in six.itervalues(self._documents) + if self._apply_filter(document, spec)) + + def _apply_filter(self, document, query): + for key, search in six.iteritems(query): + doc_val = document.get(key) + if isinstance(search, dict): + op_dict = {'$in': lambda dv, sv: dv in sv} + is_match = all( + op_str in op_dict and op_dict[op_str](doc_val, search_val) + for op_str, search_val in six.iteritems(search) + ) + else: + is_match = doc_val == search + + return is_match + + def _copy_doc(self, obj, container): + if isinstance(obj, list): + new = [] + for item in obj: + new.append(self._copy_doc(item, container)) + return new + if isinstance(obj, dict): + new = container() + for key, value in obj.items(): + new[key] = self._copy_doc(value, container) + return new + else: + return copy.copy(obj) + + def insert(self, data, manipulate=True, **kwargs): + if isinstance(data, list): + return [self._insert(element) for element in data] + return self._insert(data) + + def save(self, data, manipulate=True, **kwargs): + return self._insert(data) + + def _insert(self, data): + if '_id' not in data: + data['_id'] = uuid.uuid4().hex + object_id = data['_id'] + self._documents[object_id] = self._internalize_dict(data) + return object_id + + def find_and_modify(self, spec, document, upsert=False, **kwargs): + self.update(spec, document, upsert, **kwargs) + + def update(self, spec, document, upsert=False, **kwargs): + + existing_docs = [doc for doc in six.itervalues(self._documents) + if self._apply_filter(doc, spec)] + if existing_docs: + existing_doc = existing_docs[0] # should find only 1 match + _id = existing_doc['_id'] + existing_doc.clear() + existing_doc['_id'] = _id + existing_doc.update(self._internalize_dict(document)) + elif upsert: + existing_doc = self._documents[self._insert(document)] + + def _internalize_dict(self, d): + return dict((k, copy.deepcopy(v)) for k, v in six.iteritems(d)) + + def remove(self, spec_or_id=None, search_filter=None): + """Remove objects matching spec_or_id from the collection.""" + if spec_or_id is None: + spec_or_id = search_filter if search_filter else {} + if not isinstance(spec_or_id, dict): + spec_or_id = {'_id': spec_or_id} + to_delete = list(self.find(spec=spec_or_id)) + for doc in to_delete: + doc_id = doc['_id'] + del self._documents[doc_id] + + return { + "connectionId": uuid.uuid4().hex, + "n": len(to_delete), + "ok": 1.0, + "err": None, + } + + +class MockMongoDB(object): + def __init__(self, dbname): + self._dbname = dbname + self.mainpulator = None + + def authenticate(self, username, password): + pass + + def add_son_manipulator(self, manipulator): + global SON_MANIPULATOR + SON_MANIPULATOR = manipulator + + def __getattr__(self, name): + if name == 'authenticate': + return self.authenticate + elif name == 'name': + return self._dbname + elif name == 'add_son_manipulator': + return self.add_son_manipulator + else: + return get_collection(self._dbname, name) + + def __getitem__(self, name): + return get_collection(self._dbname, name) + + +class MockMongoClient(object): + def __init__(self, *args, **kwargs): + pass + + def __getattr__(self, dbname): + return MockMongoDB(dbname) + + +def get_collection(db_name, collection_name): + mongo_collection = MockCollection(MockMongoDB(db_name), collection_name) + return mongo_collection + + +def pymongo_override(): + global pymongo + import pymongo + if pymongo.MongoClient is not MockMongoClient: + pymongo.MongoClient = MockMongoClient + if pymongo.MongoReplicaSetClient is not MockMongoClient: + pymongo.MongoClient = MockMongoClient + + +class MyTransformer(mongo.BaseTransform): + """Added here just to check manipulator logic is used correctly.""" + + def transform_incoming(self, son, collection): + return super(MyTransformer, self).transform_incoming(son, collection) + + def transform_outgoing(self, son, collection): + return super(MyTransformer, self).transform_outgoing(son, collection) + + +class MongoCache(tests.BaseTestCase): + def setUp(self): + super(MongoCache, self).setUp() + global COLLECTIONS + COLLECTIONS = {} + mongo.MongoApi._DB = {} + mongo.MongoApi._MONGO_COLLS = {} + pymongo_override() + # using typical configuration + self.arguments = { + 'db_hosts': 'localhost:27017', + 'db_name': 'ks_cache', + 'cache_collection': 'cache', + 'username': 'test_user', + 'password': 'test_password' + } + + def test_missing_db_hosts(self): + self.arguments.pop('db_hosts') + region = dp_region.make_region() + self.assertRaises(exception.ValidationError, region.configure, + 'keystone.cache.mongo', + arguments=self.arguments) + + def test_missing_db_name(self): + self.arguments.pop('db_name') + region = dp_region.make_region() + self.assertRaises(exception.ValidationError, region.configure, + 'keystone.cache.mongo', + arguments=self.arguments) + + def test_missing_cache_collection_name(self): + self.arguments.pop('cache_collection') + region = dp_region.make_region() + self.assertRaises(exception.ValidationError, region.configure, + 'keystone.cache.mongo', + arguments=self.arguments) + + def test_incorrect_write_concern(self): + self.arguments['w'] = 'one value' + region = dp_region.make_region() + self.assertRaises(exception.ValidationError, region.configure, + 'keystone.cache.mongo', + arguments=self.arguments) + + def test_correct_write_concern(self): + self.arguments['w'] = 1 + region = dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue10") + # There is no proxy so can access MongoCacheBackend directly + self.assertEqual(region.backend.api.w, 1) + + def test_incorrect_read_preference(self): + self.arguments['read_preference'] = 'inValidValue' + region = dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + # As per delayed loading of pymongo, read_preference value should + # still be string and NOT enum + self.assertEqual(region.backend.api.read_preference, + 'inValidValue') + + random_key = uuid.uuid4().hex + self.assertRaises(ValueError, region.set, + random_key, "dummyValue10") + + def test_correct_read_preference(self): + self.arguments['read_preference'] = 'secondaryPreferred' + region = dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + # As per delayed loading of pymongo, read_preference value should + # still be string and NOT enum + self.assertEqual(region.backend.api.read_preference, + 'secondaryPreferred') + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue10") + + # Now as pymongo is loaded so expected read_preference value is enum. + # There is no proxy so can access MongoCacheBackend directly + self.assertEqual(region.backend.api.read_preference, 3) + + def test_missing_replica_set_name(self): + self.arguments['use_replica'] = True + region = dp_region.make_region() + self.assertRaises(exception.ValidationError, region.configure, + 'keystone.cache.mongo', + arguments=self.arguments) + + def test_provided_replica_set_name(self): + self.arguments['use_replica'] = True + self.arguments['replicaset_name'] = 'my_replica' + dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + self.assertTrue(True) # reached here means no initialization error + + def test_incorrect_mongo_ttl_seconds(self): + self.arguments['mongo_ttl_seconds'] = 'sixty' + region = dp_region.make_region() + self.assertRaises(exception.ValidationError, region.configure, + 'keystone.cache.mongo', + arguments=self.arguments) + + def test_cache_configuration_values_assertion(self): + self.arguments['use_replica'] = True + self.arguments['replicaset_name'] = 'my_replica' + self.arguments['mongo_ttl_seconds'] = 60 + self.arguments['ssl'] = False + region = dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + # There is no proxy so can access MongoCacheBackend directly + self.assertEqual(region.backend.api.hosts, 'localhost:27017') + self.assertEqual(region.backend.api.db_name, 'ks_cache') + self.assertEqual(region.backend.api.cache_collection, 'cache') + self.assertEqual(region.backend.api.username, 'test_user') + self.assertEqual(region.backend.api.password, 'test_password') + self.assertEqual(region.backend.api.use_replica, True) + self.assertEqual(region.backend.api.replicaset_name, 'my_replica') + self.assertEqual(region.backend.api.conn_kwargs['ssl'], False) + self.assertEqual(region.backend.api.ttl_seconds, 60) + + def test_multiple_region_cache_configuration(self): + arguments1 = copy.copy(self.arguments) + arguments1['cache_collection'] = 'cache_region1' + + region1 = dp_region.make_region().configure('keystone.cache.mongo', + arguments=arguments1) + # There is no proxy so can access MongoCacheBackend directly + self.assertEqual(region1.backend.api.hosts, 'localhost:27017') + self.assertEqual(region1.backend.api.db_name, 'ks_cache') + self.assertEqual(region1.backend.api.cache_collection, 'cache_region1') + self.assertEqual(region1.backend.api.username, 'test_user') + self.assertEqual(region1.backend.api.password, 'test_password') + # Should be None because of delayed initialization + self.assertIsNone(region1.backend.api._data_manipulator) + + random_key1 = uuid.uuid4().hex + region1.set(random_key1, "dummyValue10") + self.assertEqual("dummyValue10", region1.get(random_key1)) + # Now should have initialized + self.assertIsInstance(region1.backend.api._data_manipulator, + mongo.BaseTransform) + + class_name = '%s.%s' % (MyTransformer.__module__, "MyTransformer") + + arguments2 = copy.copy(self.arguments) + arguments2['cache_collection'] = 'cache_region2' + arguments2['son_manipulator'] = class_name + + region2 = dp_region.make_region().configure('keystone.cache.mongo', + arguments=arguments2) + # There is no proxy so can access MongoCacheBackend directly + self.assertEqual(region2.backend.api.hosts, 'localhost:27017') + self.assertEqual(region2.backend.api.db_name, 'ks_cache') + self.assertEqual(region2.backend.api.cache_collection, 'cache_region2') + + # Should be None because of delayed initialization + self.assertIsNone(region2.backend.api._data_manipulator) + + random_key = uuid.uuid4().hex + region2.set(random_key, "dummyValue20") + self.assertEqual("dummyValue20", region2.get(random_key)) + # Now should have initialized + self.assertIsInstance(region2.backend.api._data_manipulator, + MyTransformer) + + region1.set(random_key1, "dummyValue22") + self.assertEqual("dummyValue22", region1.get(random_key1)) + + def test_typical_configuration(self): + + dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + self.assertTrue(True) # reached here means no initialization error + + def test_backend_get_missing_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + random_key = uuid.uuid4().hex + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, region.get(random_key)) + + def test_backend_set_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue") + self.assertEqual("dummyValue", region.get(random_key)) + + def test_backend_set_data_with_string_as_valid_ttl(self): + + self.arguments['mongo_ttl_seconds'] = '3600' + region = dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + self.assertEqual(region.backend.api.ttl_seconds, 3600) + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue") + self.assertEqual("dummyValue", region.get(random_key)) + + def test_backend_set_data_with_int_as_valid_ttl(self): + + self.arguments['mongo_ttl_seconds'] = 1800 + region = dp_region.make_region().configure('keystone.cache.mongo', + arguments=self.arguments) + self.assertEqual(region.backend.api.ttl_seconds, 1800) + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue") + self.assertEqual("dummyValue", region.get(random_key)) + + def test_backend_set_none_as_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + random_key = uuid.uuid4().hex + region.set(random_key, None) + self.assertIsNone(region.get(random_key)) + + def test_backend_set_blank_as_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + random_key = uuid.uuid4().hex + region.set(random_key, "") + self.assertEqual("", region.get(random_key)) + + def test_backend_set_same_key_multiple_times(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue") + self.assertEqual("dummyValue", region.get(random_key)) + + dict_value = {'key1': 'value1'} + region.set(random_key, dict_value) + self.assertEqual(dict_value, region.get(random_key)) + + region.set(random_key, "dummyValue2") + self.assertEqual("dummyValue2", region.get(random_key)) + + def test_backend_multi_set_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + random_key = uuid.uuid4().hex + random_key1 = uuid.uuid4().hex + random_key2 = uuid.uuid4().hex + random_key3 = uuid.uuid4().hex + mapping = {random_key1: 'dummyValue1', + random_key2: 'dummyValue2', + random_key3: 'dummyValue3'} + region.set_multi(mapping) + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, region.get(random_key)) + self.assertFalse(region.get(random_key)) + self.assertEqual("dummyValue1", region.get(random_key1)) + self.assertEqual("dummyValue2", region.get(random_key2)) + self.assertEqual("dummyValue3", region.get(random_key3)) + + def test_backend_multi_get_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + random_key = uuid.uuid4().hex + random_key1 = uuid.uuid4().hex + random_key2 = uuid.uuid4().hex + random_key3 = uuid.uuid4().hex + mapping = {random_key1: 'dummyValue1', + random_key2: '', + random_key3: 'dummyValue3'} + region.set_multi(mapping) + + keys = [random_key, random_key1, random_key2, random_key3] + results = region.get_multi(keys) + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, results[0]) + self.assertEqual("dummyValue1", results[1]) + self.assertEqual("", results[2]) + self.assertEqual("dummyValue3", results[3]) + + def test_backend_multi_set_should_update_existing(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + random_key = uuid.uuid4().hex + random_key1 = uuid.uuid4().hex + random_key2 = uuid.uuid4().hex + random_key3 = uuid.uuid4().hex + mapping = {random_key1: 'dummyValue1', + random_key2: 'dummyValue2', + random_key3: 'dummyValue3'} + region.set_multi(mapping) + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, region.get(random_key)) + self.assertEqual("dummyValue1", region.get(random_key1)) + self.assertEqual("dummyValue2", region.get(random_key2)) + self.assertEqual("dummyValue3", region.get(random_key3)) + + mapping = {random_key1: 'dummyValue4', + random_key2: 'dummyValue5'} + region.set_multi(mapping) + self.assertEqual(api.NO_VALUE, region.get(random_key)) + self.assertEqual("dummyValue4", region.get(random_key1)) + self.assertEqual("dummyValue5", region.get(random_key2)) + self.assertEqual("dummyValue3", region.get(random_key3)) + + def test_backend_multi_set_get_with_blanks_none(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + random_key = uuid.uuid4().hex + random_key1 = uuid.uuid4().hex + random_key2 = uuid.uuid4().hex + random_key3 = uuid.uuid4().hex + random_key4 = uuid.uuid4().hex + mapping = {random_key1: 'dummyValue1', + random_key2: None, + random_key3: '', + random_key4: 'dummyValue4'} + region.set_multi(mapping) + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, region.get(random_key)) + self.assertEqual("dummyValue1", region.get(random_key1)) + self.assertIsNone(region.get(random_key2)) + self.assertEqual("", region.get(random_key3)) + self.assertEqual("dummyValue4", region.get(random_key4)) + + keys = [random_key, random_key1, random_key2, random_key3, random_key4] + results = region.get_multi(keys) + + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, results[0]) + self.assertEqual("dummyValue1", results[1]) + self.assertIsNone(results[2]) + self.assertEqual("", results[3]) + self.assertEqual("dummyValue4", results[4]) + + mapping = {random_key1: 'dummyValue5', + random_key2: 'dummyValue6'} + region.set_multi(mapping) + self.assertEqual(api.NO_VALUE, region.get(random_key)) + self.assertEqual("dummyValue5", region.get(random_key1)) + self.assertEqual("dummyValue6", region.get(random_key2)) + self.assertEqual("", region.get(random_key3)) + + def test_backend_delete_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue") + self.assertEqual("dummyValue", region.get(random_key)) + + region.delete(random_key) + # should return NO_VALUE as key no longer exists in cache + self.assertEqual(api.NO_VALUE, region.get(random_key)) + + def test_backend_multi_delete_data(self): + + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + random_key = uuid.uuid4().hex + random_key1 = uuid.uuid4().hex + random_key2 = uuid.uuid4().hex + random_key3 = uuid.uuid4().hex + mapping = {random_key1: 'dummyValue1', + random_key2: 'dummyValue2', + random_key3: 'dummyValue3'} + region.set_multi(mapping) + # should return NO_VALUE as key does not exist in cache + self.assertEqual(api.NO_VALUE, region.get(random_key)) + self.assertEqual("dummyValue1", region.get(random_key1)) + self.assertEqual("dummyValue2", region.get(random_key2)) + self.assertEqual("dummyValue3", region.get(random_key3)) + self.assertEqual(api.NO_VALUE, region.get("InvalidKey")) + + keys = mapping.keys() + + region.delete_multi(keys) + + self.assertEqual(api.NO_VALUE, region.get("InvalidKey")) + # should return NO_VALUE as keys no longer exist in cache + self.assertEqual(api.NO_VALUE, region.get(random_key1)) + self.assertEqual(api.NO_VALUE, region.get(random_key2)) + self.assertEqual(api.NO_VALUE, region.get(random_key3)) + + def test_additional_crud_method_arguments_support(self): + """Additional arguments should works across find/insert/update.""" + + self.arguments['wtimeout'] = 30000 + self.arguments['j'] = True + self.arguments['continue_on_error'] = True + self.arguments['secondary_acceptable_latency_ms'] = 60 + region = dp_region.make_region().configure( + 'keystone.cache.mongo', + arguments=self.arguments + ) + + # There is no proxy so can access MongoCacheBackend directly + api_methargs = region.backend.api.meth_kwargs + self.assertEqual(api_methargs['wtimeout'], 30000) + self.assertEqual(api_methargs['j'], True) + self.assertEqual(api_methargs['continue_on_error'], True) + self.assertEqual(api_methargs['secondary_acceptable_latency_ms'], 60) + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue1") + self.assertEqual("dummyValue1", region.get(random_key)) + + region.set(random_key, "dummyValue2") + self.assertEqual("dummyValue2", region.get(random_key)) + + random_key = uuid.uuid4().hex + region.set(random_key, "dummyValue3") + self.assertEqual("dummyValue3", region.get(random_key)) From 5c8b2307abea0267f339c4771af8f0a1dc378cca Mon Sep 17 00:00:00 2001 From: Steve Martinelli Date: Tue, 3 Feb 2015 17:05:10 -0500 Subject: [PATCH 33/47] Use oslo.log instead of incubator Most of changes are just replacing from keystone.openstack.common import log with from oslo_log import log There are some other specific changes that had to be made * Initialize logger in keystone/config.py Change-Id: I859edb71c434051ffe7f34c16018b738ddb71e3b --- keystone/common/cache/_memcache_pool.py | 2 +- keystone/common/cache/backends/mongo.py | 2 +- keystone/common/cache/core.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index da897522..ca974c93 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -26,11 +26,11 @@ import threading import time import memcache +from oslo_log import log from six.moves import queue from keystone import exception from keystone.i18n import _ -from keystone.openstack.common import log LOG = log.getLogger(__name__) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index ca2eba4f..0d14864e 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -17,13 +17,13 @@ import datetime from dogpile.cache import api from dogpile.cache import util as dp_util +from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils import six from keystone import exception from keystone.i18n import _, _LW -from keystone.openstack.common import log NO_VALUE = api.NO_VALUE diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 34dd3a3d..0ea78567 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -17,12 +17,12 @@ import dogpile.cache from dogpile.cache import proxy from dogpile.cache import util +from oslo_log import log from oslo_utils import importutils from keystone import config from keystone import exception from keystone.i18n import _, _LE -from keystone.openstack.common import log CONF = config.CONF From 7c548400cca4636ad687a814ab94f5cdb9b3fd67 Mon Sep 17 00:00:00 2001 From: Boris Bobrov Date: Mon, 9 Feb 2015 18:58:26 +0300 Subject: [PATCH 34/47] Fix invalid super() usage in memcache pool queue.Queue is an old-style class in stdlib and new-style in eventlet. Because of that TypeError was raised when Keystone was launched using Apache. Change-Id: I5880ea75fc24de5b281238e6c37de75a8a617808 Closes-bug: 1419853 --- keystone/common/cache/_memcache_pool.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index da897522..b7c4828b 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -67,6 +67,8 @@ class ConnectionPool(queue.Queue): indefinite. :type conn_get_timeout: int """ + # super() cannot be used here because Queue in stdlib is an + # old-style class queue.Queue.__init__(self, maxsize) self._unused_timeout = unused_timeout self._connection_get_timeout = conn_get_timeout @@ -118,7 +120,9 @@ class ConnectionPool(queue.Queue): self._debug_logger('Releasing connection %s', id(conn)) self._drop_expired_connections() try: - super(ConnectionPool, self).put(conn, block=False) + # super() cannot be used here because Queue in stdlib is an + # old-style class + queue.Queue.put(self, conn, block=False) except queue.Full: self._debug_logger('Reaping exceeding connection %s', id(conn)) self._destroy_connection(conn) @@ -163,6 +167,8 @@ class ConnectionPool(queue.Queue): class MemcacheClientPool(ConnectionPool): def __init__(self, urls, arguments, **kwargs): + # super() cannot be used here because Queue in stdlib is an + # old-style class ConnectionPool.__init__(self, **kwargs) self.urls = urls self._arguments = arguments @@ -178,6 +184,8 @@ class MemcacheClientPool(ConnectionPool): conn.disconnect_all() def _get(self): + # super() cannot be used here because Queue in stdlib is an + # old-style class conn = ConnectionPool._get(self) try: # Propagate host state known to us to this client's list @@ -189,7 +197,9 @@ class MemcacheClientPool(ConnectionPool): except Exception: # We need to be sure that connection doesn't leak from the pool. # This code runs before we enter context manager's try-finally - # block, so we need to explicitly release it here + # block, so we need to explicitly release it here. + # super() cannot be used here because Queue in stdlib is an + # old-style class ConnectionPool._put(self, conn) raise return conn @@ -218,4 +228,6 @@ class MemcacheClientPool(ConnectionPool): self._debug_logger('All hosts are dead. Marking them as live.') self._hosts_deaduntil[:] = [0] * len(self._hosts_deaduntil) finally: + # super() cannot be used here because Queue in stdlib is an + # old-style class ConnectionPool._put(self, conn) From 156073cfc9def7ffdd3fdd14a968e3915f0f1d4d Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Wed, 14 Jan 2015 20:19:35 -0600 Subject: [PATCH 35/47] Consistently use oslo_config.cfg.CONF Keystone modules used different sources of the CONF global so were inconsistent. All modules should use CONF from oslo_config.cfg. Change-Id: I60c8d2c577d37b9b8a367b46596154ce6c49fff4 --- keystone/common/cache/core.py | 4 ++-- keystone/tests/unit/test_cache.py | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 0ea78567..df424010 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -17,15 +17,15 @@ import dogpile.cache from dogpile.cache import proxy from dogpile.cache import util +from oslo_config import cfg from oslo_log import log from oslo_utils import importutils -from keystone import config from keystone import exception from keystone.i18n import _, _LE -CONF = config.CONF +CONF = cfg.CONF LOG = log.getLogger(__name__) make_region = dogpile.cache.make_region diff --git a/keystone/tests/unit/test_cache.py b/keystone/tests/unit/test_cache.py index b3223c81..cb516316 100644 --- a/keystone/tests/unit/test_cache.py +++ b/keystone/tests/unit/test_cache.py @@ -16,14 +16,14 @@ import copy from dogpile.cache import api from dogpile.cache import proxy +from oslo_config import cfg from keystone.common import cache -from keystone import config from keystone import exception from keystone.tests import unit as tests -CONF = config.CONF +CONF = cfg.CONF NO_VALUE = api.NO_VALUE @@ -82,7 +82,7 @@ class CacheRegionTest(tests.TestCase): def _add_test_caching_option(self): self.config_fixture.register_opt( - config.config.cfg.BoolOpt('caching', default=True), group='cache') + cfg.BoolOpt('caching', default=True), group='cache') def _get_cacheable_function(self): SHOULD_CACHE_FN = cache.should_cache_fn('cache') From 2c4b35e3f21567a822cbc55b1cf63295c625dea9 Mon Sep 17 00:00:00 2001 From: "ChangBo Guo(gcb)" Date: Wed, 24 Dec 2014 22:10:41 +0800 Subject: [PATCH 36/47] Use dict comprehensions instead of dict constructor PEP-0274 introduced dict comprehensions [1], these are benefits: The dictionary constructor approach has two distinct disadvantages from the proposed syntax though. First, it isn't as legible as a dict comprehension. Second, it forces the programmer to create an in-core list object first, which could be expensive. Keystone dropped python 2.6 support in Kilo, we can leaverage this now. There is deep dive about PEP-0274[2] and basic tests about performance[3]. Note: This commit doesn't handle dict constructor with kwargs. This commit also adds a hacking rule. [1]http://legacy.python.org/dev/peps/pep-0274/ [2]http://doughellmann.com/2012/11/12/the-performance-impact-of-using-dict-instead-of-in-cpython-2-7-2.html [3]http://paste.openstack.org/show/154798 Change-Id: Ie74719d0c969fa7819c243d5b162df6656c1e136 --- keystone/common/cache/backends/mongo.py | 5 ++--- keystone/tests/unit/test_cache_backend_mongo.py | 2 +- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 0d14864e..b5de9bc4 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -418,14 +418,13 @@ class MongoApi(object): def get_multi(self, keys): db_results = self._get_results_as_dict(keys) - return dict((doc['_id'], doc['value']) for doc in - six.itervalues(db_results)) + return {doc['_id']: doc['value'] for doc in six.itervalues(db_results)} def _get_results_as_dict(self, keys): critieria = {'_id': {'$in': keys}} db_results = self.get_cache_collection().find(spec=critieria, **self.meth_kwargs) - return dict((doc['_id'], doc) for doc in db_results) + return {doc['_id']: doc for doc in db_results} def set(self, key, value): doc_date = self._get_doc_date() diff --git a/keystone/tests/unit/test_cache_backend_mongo.py b/keystone/tests/unit/test_cache_backend_mongo.py index 639dc1b1..4c21b9cd 100644 --- a/keystone/tests/unit/test_cache_backend_mongo.py +++ b/keystone/tests/unit/test_cache_backend_mongo.py @@ -198,7 +198,7 @@ class MockCollection(object): existing_doc = self._documents[self._insert(document)] def _internalize_dict(self, d): - return dict((k, copy.deepcopy(v)) for k, v in six.iteritems(d)) + return {k: copy.deepcopy(v) for k, v in six.iteritems(d)} def remove(self, spec_or_id=None, search_filter=None): """Remove objects matching spec_or_id from the collection.""" From d5fde2f886874ae5a109580078ef0f83c10a9e96 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Mon, 2 Mar 2015 14:29:49 -0800 Subject: [PATCH 37/47] Make the default cache time more explicit in code To avoid confusion, make the cache time and cache toggle for a given subsystem configurable via the more friendly `get_memoization_decorator`. This has the added benefit of making the resulting decorated methods easier to read as they are only decorated with the resulting function not needing the decorator with the same arguments repeatedly. This change also creates the `get_expiration_time_fn` function that better describes the fallthrough behavior of the [cache] expiration_time config value. This patch updates the revocation event cache time to be in the proper [revoke] section of the config for consistency with the other sections. DocImpact: revocation_cache_time moved from [token] config section to [revoke]/cache_time Co-Authored-By: David Stanek Change-Id: I5c4e5f509ab82b060938591655e6c615ac7d38dd --- keystone/common/cache/core.py | 96 ++++++++++++++++++++++++++++++- keystone/tests/unit/test_cache.py | 84 +++++++++++++++++++++++++-- 2 files changed, 174 insertions(+), 6 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index df424010..88b3ea72 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -165,7 +165,7 @@ def configure_cache_region(region): return region -def should_cache_fn(section): +def get_should_cache_fn(section): """Build a function that returns a config section's caching status. For any given driver in keystone that has caching capabilities, a boolean @@ -178,7 +178,7 @@ def should_cache_fn(section): from keystone.common import cache - SHOULD_CACHE = cache.should_cache_fn('token') + SHOULD_CACHE = cache.get_should_cache_fn('token') @cache.on_arguments(should_cache_fn=SHOULD_CACHE) def function(arg1, arg2): @@ -196,6 +196,44 @@ def should_cache_fn(section): return should_cache +def get_expiration_time_fn(section): + """Build a function that returns a config section's expiration time status. + + For any given driver in keystone that has caching capabilities, an int + config option called ``cache_time`` for that driver's section + (e.g. ``token``) should exist and typically default to ``None``. This + function will use that value to tell the caching decorator of the TTL + override for caching the resulting objects. If the value of the config + option is ``None`` the default value provided in the + ``[cache] expiration_time`` option will be used by the decorator. The + default may be set to something other than ``None`` in cases where the + caching TTL should not be tied to the global default(s) (e.g. + revocation_list changes very infrequently and can be cached for >1h by + default). + + To properly use this with the decorator, pass this function the + configuration section and assign the result to a variable. Pass the new + variable to the caching decorator as the named argument + ``expiration_time``. e.g.:: + + from keystone.common import cache + + EXPIRATION_TIME = cache.get_expiration_time_fn('token') + + @cache.on_arguments(expiration_time=EXPIRATION_TIME) + def function(arg1, arg2): + ... + + :param section: name of the configuration section to examine + :type section: string + :rtype: function reference + """ + def get_expiration_time(): + conf_group = getattr(CONF, section) + return getattr(conf_group, 'cache_time', None) + return get_expiration_time + + def key_generate_to_str(s): # NOTE(morganfainberg): Since we need to stringify all arguments, attempt # to stringify and handle the Unicode error explicitly as needed. @@ -214,3 +252,57 @@ def function_key_generator(namespace, fn, to_str=key_generate_to_str): REGION = dogpile.cache.make_region( function_key_generator=function_key_generator) on_arguments = REGION.cache_on_arguments + + +def get_memoization_decorator(section, expiration_section=None): + """Build a function based on the `on_arguments` decorator for the section. + + For any given driver in Keystone that has caching capabilities, a + pair of functions is required to properly determine the status of the + caching capabilities (a toggle to indicate caching is enabled and any + override of the default TTL for cached data). This function will return + an object that has the memoization decorator ``on_arguments`` + pre-configured for the driver. + + Example usage:: + + from keystone.common import cache + + MEMOIZE = cache.get_memoization_decorator(section='token') + + @MEMOIZE + def function(arg1, arg2): + ... + + + ALTERNATE_MEMOIZE = cache.get_memoization_decorator( + section='token', expiration_section='revoke') + + @ALTERNATE_MEMOIZE + def function2(arg1, arg2): + ... + + :param section: name of the configuration section to examine + :type section: string + :param expiration_section: name of the configuration section to examine + for the expiration option. This will fall back + to using ``section`` if the value is unspecified + or ``None`` + :type expiration_section: string + :rtype: function reference + """ + if expiration_section is None: + expiration_section = section + should_cache = get_should_cache_fn(section) + expiration_time = get_expiration_time_fn(expiration_section) + + memoize = REGION.cache_on_arguments(should_cache_fn=should_cache, + expiration_time=expiration_time) + + # Make sure the actual "should_cache" and "expiration_time" methods are + # available. This is potentially interesting/useful to pre-seed cache + # values. + setattr(memoize, 'should_cache_fn', should_cache) + setattr(memoize, 'expiration_time_fn', expiration_time) + + return memoize diff --git a/keystone/tests/unit/test_cache.py b/keystone/tests/unit/test_cache.py index cb516316..5a778a07 100644 --- a/keystone/tests/unit/test_cache.py +++ b/keystone/tests/unit/test_cache.py @@ -13,9 +13,12 @@ # under the License. import copy +import time +import uuid from dogpile.cache import api from dogpile.cache import proxy +import mock from oslo_config import cfg from keystone.common import cache @@ -85,11 +88,13 @@ class CacheRegionTest(tests.TestCase): cfg.BoolOpt('caching', default=True), group='cache') def _get_cacheable_function(self): - SHOULD_CACHE_FN = cache.should_cache_fn('cache') + with mock.patch.object(cache.REGION, 'cache_on_arguments', + self.region.cache_on_arguments): + memoize = cache.get_memoization_decorator(section='cache') - @self.region.cache_on_arguments(should_cache_fn=SHOULD_CACHE_FN) - def cacheable_function(value): - return value + @memoize + def cacheable_function(value): + return value return cacheable_function @@ -105,6 +110,77 @@ class CacheRegionTest(tests.TestCase): cache.configure_cache_region(self.region) cache.configure_cache_region(self.region) + def _get_cache_fallthrough_fn(self, cache_time): + with mock.patch.object(cache.REGION, 'cache_on_arguments', + self.region.cache_on_arguments): + memoize = cache.get_memoization_decorator( + section='cache', + expiration_section='assignment') + + class _test_obj(object): + def __init__(self, value): + self.test_value = value + + @memoize + def get_test_value(self): + return self.test_value + + def _do_test(value): + + test_obj = _test_obj(value) + + # Ensure the value has been cached + test_obj.get_test_value() + # Get the now cached value + cached_value = test_obj.get_test_value() + self.assertTrue(cached_value.cached) + self.assertEqual(value.value, cached_value.value) + self.assertEqual(cached_value.value, test_obj.test_value.value) + # Change the underlying value on the test object. + test_obj.test_value = TestProxyValue(uuid.uuid4().hex) + self.assertEqual(cached_value.value, + test_obj.get_test_value().value) + # override the system time to ensure the non-cached new value + # is returned + new_time = time.time() + (cache_time * 2) + with mock.patch.object(time, 'time', + return_value=new_time): + overriden_cache_value = test_obj.get_test_value() + self.assertNotEqual(cached_value.value, + overriden_cache_value.value) + self.assertEqual(test_obj.test_value.value, + overriden_cache_value.value) + + return _do_test + + def test_cache_no_fallthrough_expiration_time_fn(self): + # Since we do not re-configure the cache region, for ease of testing + # this value is set the same as the expiration_time default in the + # [cache] section + cache_time = 600 + expiration_time = cache.get_expiration_time_fn('role') + do_test = self._get_cache_fallthrough_fn(cache_time) + # Run the test with the assignment cache_time value + self.config_fixture.config(cache_time=cache_time, + group='role') + test_value = TestProxyValue(uuid.uuid4().hex) + self.assertEqual(cache_time, expiration_time()) + do_test(value=test_value) + + def test_cache_fallthrough_expiration_time_fn(self): + # Since we do not re-configure the cache region, for ease of testing + # this value is set the same as the expiration_time default in the + # [cache] section + cache_time = 599 + expiration_time = cache.get_expiration_time_fn('role') + do_test = self._get_cache_fallthrough_fn(cache_time) + # Run the test with the assignment cache_time value set to None and + # the global value set. + self.config_fixture.config(cache_time=None, group='role') + test_value = TestProxyValue(uuid.uuid4().hex) + self.assertIsNone(expiration_time()) + do_test(value=test_value) + def test_should_cache_fn_global_cache_enabled(self): # Verify should_cache_fn generates a sane function for subsystem and # functions as expected with caching globally enabled. From 8bcad03c0af6a903a12b236548494dff637d8464 Mon Sep 17 00:00:00 2001 From: David Stanek Date: Wed, 4 Mar 2015 20:43:11 +0000 Subject: [PATCH 38/47] Fixes minor whitespace issues Change-Id: I45492ccce484c9ae9ab98d88b595a17db1258029 --- keystone/tests/unit/test_cache_backend_mongo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/keystone/tests/unit/test_cache_backend_mongo.py b/keystone/tests/unit/test_cache_backend_mongo.py index 4c21b9cd..221affae 100644 --- a/keystone/tests/unit/test_cache_backend_mongo.py +++ b/keystone/tests/unit/test_cache_backend_mongo.py @@ -242,7 +242,7 @@ class MockMongoDB(object): return get_collection(self._dbname, name) def __getitem__(self, name): - return get_collection(self._dbname, name) + return get_collection(self._dbname, name) class MockMongoClient(object): From 43ccf01ee6360e5ff4500e4077295bfc3bcaa598 Mon Sep 17 00:00:00 2001 From: Dave Chen Date: Fri, 6 Mar 2015 23:11:05 +0800 Subject: [PATCH 39/47] Fix the wrong order of parameters when using assertEqual The first parameter is the expected value while the second parameter is the actual value. Change-Id: Id19a86def58b984e3a1ae3b8b6b43900a2b50c92 --- .../tests/unit/test_cache_backend_mongo.py | 57 +++++++++---------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/keystone/tests/unit/test_cache_backend_mongo.py b/keystone/tests/unit/test_cache_backend_mongo.py index 221affae..a56bf754 100644 --- a/keystone/tests/unit/test_cache_backend_mongo.py +++ b/keystone/tests/unit/test_cache_backend_mongo.py @@ -330,7 +330,7 @@ class MongoCache(tests.BaseTestCase): random_key = uuid.uuid4().hex region.set(random_key, "dummyValue10") # There is no proxy so can access MongoCacheBackend directly - self.assertEqual(region.backend.api.w, 1) + self.assertEqual(1, region.backend.api.w) def test_incorrect_read_preference(self): self.arguments['read_preference'] = 'inValidValue' @@ -338,8 +338,7 @@ class MongoCache(tests.BaseTestCase): arguments=self.arguments) # As per delayed loading of pymongo, read_preference value should # still be string and NOT enum - self.assertEqual(region.backend.api.read_preference, - 'inValidValue') + self.assertEqual('inValidValue', region.backend.api.read_preference) random_key = uuid.uuid4().hex self.assertRaises(ValueError, region.set, @@ -351,15 +350,15 @@ class MongoCache(tests.BaseTestCase): arguments=self.arguments) # As per delayed loading of pymongo, read_preference value should # still be string and NOT enum - self.assertEqual(region.backend.api.read_preference, - 'secondaryPreferred') + self.assertEqual('secondaryPreferred', + region.backend.api.read_preference) random_key = uuid.uuid4().hex region.set(random_key, "dummyValue10") # Now as pymongo is loaded so expected read_preference value is enum. # There is no proxy so can access MongoCacheBackend directly - self.assertEqual(region.backend.api.read_preference, 3) + self.assertEqual(3, region.backend.api.read_preference) def test_missing_replica_set_name(self): self.arguments['use_replica'] = True @@ -390,15 +389,15 @@ class MongoCache(tests.BaseTestCase): region = dp_region.make_region().configure('keystone.cache.mongo', arguments=self.arguments) # There is no proxy so can access MongoCacheBackend directly - self.assertEqual(region.backend.api.hosts, 'localhost:27017') - self.assertEqual(region.backend.api.db_name, 'ks_cache') - self.assertEqual(region.backend.api.cache_collection, 'cache') - self.assertEqual(region.backend.api.username, 'test_user') - self.assertEqual(region.backend.api.password, 'test_password') - self.assertEqual(region.backend.api.use_replica, True) - self.assertEqual(region.backend.api.replicaset_name, 'my_replica') - self.assertEqual(region.backend.api.conn_kwargs['ssl'], False) - self.assertEqual(region.backend.api.ttl_seconds, 60) + self.assertEqual('localhost:27017', region.backend.api.hosts) + self.assertEqual('ks_cache', region.backend.api.db_name) + self.assertEqual('cache', region.backend.api.cache_collection) + self.assertEqual('test_user', region.backend.api.username) + self.assertEqual('test_password', region.backend.api.password) + self.assertEqual(True, region.backend.api.use_replica) + self.assertEqual('my_replica', region.backend.api.replicaset_name) + self.assertEqual(False, region.backend.api.conn_kwargs['ssl']) + self.assertEqual(60, region.backend.api.ttl_seconds) def test_multiple_region_cache_configuration(self): arguments1 = copy.copy(self.arguments) @@ -407,11 +406,11 @@ class MongoCache(tests.BaseTestCase): region1 = dp_region.make_region().configure('keystone.cache.mongo', arguments=arguments1) # There is no proxy so can access MongoCacheBackend directly - self.assertEqual(region1.backend.api.hosts, 'localhost:27017') - self.assertEqual(region1.backend.api.db_name, 'ks_cache') - self.assertEqual(region1.backend.api.cache_collection, 'cache_region1') - self.assertEqual(region1.backend.api.username, 'test_user') - self.assertEqual(region1.backend.api.password, 'test_password') + self.assertEqual('localhost:27017', region1.backend.api.hosts) + self.assertEqual('ks_cache', region1.backend.api.db_name) + self.assertEqual('cache_region1', region1.backend.api.cache_collection) + self.assertEqual('test_user', region1.backend.api.username) + self.assertEqual('test_password', region1.backend.api.password) # Should be None because of delayed initialization self.assertIsNone(region1.backend.api._data_manipulator) @@ -431,9 +430,9 @@ class MongoCache(tests.BaseTestCase): region2 = dp_region.make_region().configure('keystone.cache.mongo', arguments=arguments2) # There is no proxy so can access MongoCacheBackend directly - self.assertEqual(region2.backend.api.hosts, 'localhost:27017') - self.assertEqual(region2.backend.api.db_name, 'ks_cache') - self.assertEqual(region2.backend.api.cache_collection, 'cache_region2') + self.assertEqual('localhost:27017', region2.backend.api.hosts) + self.assertEqual('ks_cache', region2.backend.api.db_name) + self.assertEqual('cache_region2', region2.backend.api.cache_collection) # Should be None because of delayed initialization self.assertIsNone(region2.backend.api._data_manipulator) @@ -483,7 +482,7 @@ class MongoCache(tests.BaseTestCase): self.arguments['mongo_ttl_seconds'] = '3600' region = dp_region.make_region().configure('keystone.cache.mongo', arguments=self.arguments) - self.assertEqual(region.backend.api.ttl_seconds, 3600) + self.assertEqual(3600, region.backend.api.ttl_seconds) random_key = uuid.uuid4().hex region.set(random_key, "dummyValue") self.assertEqual("dummyValue", region.get(random_key)) @@ -493,7 +492,7 @@ class MongoCache(tests.BaseTestCase): self.arguments['mongo_ttl_seconds'] = 1800 region = dp_region.make_region().configure('keystone.cache.mongo', arguments=self.arguments) - self.assertEqual(region.backend.api.ttl_seconds, 1800) + self.assertEqual(1800, region.backend.api.ttl_seconds) random_key = uuid.uuid4().hex region.set(random_key, "dummyValue") self.assertEqual("dummyValue", region.get(random_key)) @@ -711,10 +710,10 @@ class MongoCache(tests.BaseTestCase): # There is no proxy so can access MongoCacheBackend directly api_methargs = region.backend.api.meth_kwargs - self.assertEqual(api_methargs['wtimeout'], 30000) - self.assertEqual(api_methargs['j'], True) - self.assertEqual(api_methargs['continue_on_error'], True) - self.assertEqual(api_methargs['secondary_acceptable_latency_ms'], 60) + self.assertEqual(30000, api_methargs['wtimeout']) + self.assertEqual(True, api_methargs['j']) + self.assertEqual(True, api_methargs['continue_on_error']) + self.assertEqual(60, api_methargs['secondary_acceptable_latency_ms']) random_key = uuid.uuid4().hex region.set(random_key, "dummyValue1") From ea3f3accd536b4da21a70583471f29e98bf3aac3 Mon Sep 17 00:00:00 2001 From: Morgan Fainberg Date: Mon, 9 Mar 2015 17:45:41 -0700 Subject: [PATCH 40/47] Address nits for default cache time more explicit This addresses minor nits in naming of functions when the cache time was made more explicit in code via refactor. Notably the `should_cache_fn` on the decorator now is called `should_cache` and the `expiration_time_fn` is called `get_expiration_time`. The declairation of the memoizing decorator in keystone.token.provider has been made consistent with other uses by utilizing the `section` kwarg instead of passing it as positional. Change-Id: I19c50f1fa1fff56f04944bedf58c9438e4115322 --- keystone/common/cache/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index 88b3ea72..d8d45e67 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -302,7 +302,7 @@ def get_memoization_decorator(section, expiration_section=None): # Make sure the actual "should_cache" and "expiration_time" methods are # available. This is potentially interesting/useful to pre-seed cache # values. - setattr(memoize, 'should_cache_fn', should_cache) - setattr(memoize, 'expiration_time_fn', expiration_time) + setattr(memoize, 'should_cache', should_cache) + setattr(memoize, 'get_expiration_time', expiration_time) return memoize From 341790e796fcc93fe2f86c4f4051570cf6a18998 Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Fri, 13 Mar 2015 14:23:34 -0500 Subject: [PATCH 41/47] Prefer . to setattr()/getattr() setattr(x, 'foobar', 123) is equivalent to x.foobar = 123, prefer using . since it's clearer. Similar for getattr(). Change-Id: I52948d0288eef2980d0e40591231549111435cdf --- keystone/common/cache/core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/core.py b/keystone/common/cache/core.py index d8d45e67..306587b3 100644 --- a/keystone/common/cache/core.py +++ b/keystone/common/cache/core.py @@ -302,7 +302,7 @@ def get_memoization_decorator(section, expiration_section=None): # Make sure the actual "should_cache" and "expiration_time" methods are # available. This is potentially interesting/useful to pre-seed cache # values. - setattr(memoize, 'should_cache', should_cache) - setattr(memoize, 'get_expiration_time', expiration_time) + memoize.should_cache = should_cache + memoize.get_expiration_time = expiration_time return memoize From 32fd56c8c655f403ecbdffc890202e15734a2d9f Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Tue, 7 Apr 2015 19:35:00 -0500 Subject: [PATCH 42/47] Work with pymongo 3.0 pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name which was causing the unit tests to fail. Change-Id: Iaa7fd7221c2e6c865633ef342e6b83304a1de655 Closes-Bug: 1441393 --- keystone/common/cache/backends/mongo.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index b5de9bc4..58856ac8 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -360,8 +360,12 @@ class MongoApi(object): self._assign_data_mainpulator() if self.read_preference: - self.read_preference = pymongo.read_preferences.mongos_enum( - self.read_preference) + # pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name + f = getattr(pymongo.read_preferences, + 'read_pref_mode_from_name', None) + if not f: + f = pymongo.read_preferences.mongos_enum + self.read_preference = f(self.read_preference) coll.read_preference = self.read_preference if self.w > -1: coll.write_concern['w'] = self.w From 913a070934e887cedb3fb1b9f3702a9f2490a94f Mon Sep 17 00:00:00 2001 From: Brant Knudson Date: Tue, 7 Apr 2015 19:35:00 -0500 Subject: [PATCH 43/47] Work with pymongo 3.0 pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name which was causing the unit tests to fail. Change-Id: Iaa7fd7221c2e6c865633ef342e6b83304a1de655 Closes-Bug: 1441393 (cherry picked from commit 7c3fe6acaef6fc283c383fb79f06e388df6e6926) --- keystone/common/cache/backends/mongo.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index b5de9bc4..58856ac8 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -360,8 +360,12 @@ class MongoApi(object): self._assign_data_mainpulator() if self.read_preference: - self.read_preference = pymongo.read_preferences.mongos_enum( - self.read_preference) + # pymongo 3.0 renamed mongos_enum to read_pref_mode_from_name + f = getattr(pymongo.read_preferences, + 'read_pref_mode_from_name', None) + if not f: + f = pymongo.read_preferences.mongos_enum + self.read_preference = f(self.read_preference) coll.read_preference = self.read_preference if self.w > -1: coll.write_concern['w'] = self.w From 794a9e12dfff63fd5172b1c68be45f0d3b8c75b1 Mon Sep 17 00:00:00 2001 From: Alexander Makarov Date: Mon, 6 Apr 2015 15:49:41 +0300 Subject: [PATCH 44/47] Make memcache client reusable across threads memcache.Client is inherited from threading._local so instances are only accessible from current thread or eventlet. Present workaround broke inheritance chain so super() call is unusable. This patch makes artificial client class mimic inheritance from threading._local while using generic object methods allowing reusability. Change-Id: Ic5d5709695877afb995fd816bb0e4ce711b99b60 Closes-Bug: #1440493 --- keystone/common/cache/_memcache_pool.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index b15332db..bc559781 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -35,11 +35,22 @@ from keystone.i18n import _ LOG = log.getLogger(__name__) -# This 'class' is taken from http://stackoverflow.com/a/22520633/238308 -# Don't inherit client from threading.local so that we can reuse clients in -# different threads -_MemcacheClient = type('_MemcacheClient', (object,), - dict(memcache.Client.__dict__)) + +class _MemcacheClient(memcache.Client): + """Thread global memcache client + + As client is inherited from threading.local we have to restore object + methods overloaded by threading.local so we can reuse clients in + different threads + """ + __delattr__ = object.__delattr__ + __getattribute__ = object.__getattribute__ + __new__ = object.__new__ + __setattr__ = object.__setattr__ + + def __del__(self): + pass + _PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection']) From d5595bbf3a8780f1b77f05d4d1a22723695f4e70 Mon Sep 17 00:00:00 2001 From: Alexander Makarov Date: Mon, 6 Apr 2015 15:49:41 +0300 Subject: [PATCH 45/47] Make memcache client reusable across threads memcache.Client is inherited from threading._local so instances are only accessible from current thread or eventlet. Present workaround broke inheritance chain so super() call is unusable. This patch makes artificial client class mimic inheritance from threading._local while using generic object methods allowing reusability. Change-Id: Ic5d5709695877afb995fd816bb0e4ce711b99b60 Closes-Bug: #1440493 (cherry picked from commit 33a95575fc3778bf8ef054f7b9d24fcb7c75100b) --- keystone/common/cache/_memcache_pool.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index b15332db..bc559781 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -35,11 +35,22 @@ from keystone.i18n import _ LOG = log.getLogger(__name__) -# This 'class' is taken from http://stackoverflow.com/a/22520633/238308 -# Don't inherit client from threading.local so that we can reuse clients in -# different threads -_MemcacheClient = type('_MemcacheClient', (object,), - dict(memcache.Client.__dict__)) + +class _MemcacheClient(memcache.Client): + """Thread global memcache client + + As client is inherited from threading.local we have to restore object + methods overloaded by threading.local so we can reuse clients in + different threads + """ + __delattr__ = object.__delattr__ + __getattribute__ = object.__getattribute__ + __new__ = object.__new__ + __setattr__ = object.__setattr__ + + def __del__(self): + pass + _PoolItem = collections.namedtuple('_PoolItem', ['ttl', 'connection']) From 9bc74ec16559695196ca2e447130e4b17e70fef0 Mon Sep 17 00:00:00 2001 From: David Stanek Date: Thu, 23 Apr 2015 14:34:24 +0000 Subject: [PATCH 46/47] Handles Python3 builtin changes Some of the builtins (like map) have changed in Python3 in a way that can lead to broken Python2 code. bp python3 Change-Id: I632d857bd29a23db61538755f09da68f0cf7b723 --- keystone/common/cache/_memcache_pool.py | 2 +- keystone/tests/unit/test_cache_backend_mongo.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/keystone/common/cache/_memcache_pool.py b/keystone/common/cache/_memcache_pool.py index bc559781..2bfcc3bb 100644 --- a/keystone/common/cache/_memcache_pool.py +++ b/keystone/common/cache/_memcache_pool.py @@ -27,7 +27,7 @@ import time import memcache from oslo_log import log -from six.moves import queue +from six.moves import queue, zip from keystone import exception from keystone.i18n import _ diff --git a/keystone/tests/unit/test_cache_backend_mongo.py b/keystone/tests/unit/test_cache_backend_mongo.py index 8afd5e8e..1d8b607d 100644 --- a/keystone/tests/unit/test_cache_backend_mongo.py +++ b/keystone/tests/unit/test_cache_backend_mongo.py @@ -20,6 +20,7 @@ import uuid from dogpile.cache import api from dogpile.cache import region as dp_region import six +from six.moves import range from keystone.common.cache.backends import mongo from keystone import exception From c3077247fef29d417bc09ed95762b82906faaea5 Mon Sep 17 00:00:00 2001 From: David Stanek Date: Thu, 23 Apr 2015 15:20:58 +0000 Subject: [PATCH 47/47] Fixes use of dict methods for Python3 In Python3 the dict.keys(), dict.values() and dict.items() methods have been changed to return iterators instead of lists. This causes issues with code that expects a list. bp python3 Change-Id: Id0d55ea4b992666848af1b1a055bc7841548cc6a --- keystone/common/cache/backends/mongo.py | 6 +++--- keystone/tests/unit/test_cache_backend_mongo.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/keystone/common/cache/backends/mongo.py b/keystone/common/cache/backends/mongo.py index 58856ac8..0598e80e 100644 --- a/keystone/common/cache/backends/mongo.py +++ b/keystone/common/cache/backends/mongo.py @@ -451,7 +451,7 @@ class MongoApi(object): doc_date = self._get_doc_date() insert_refs = [] update_refs = [] - existing_docs = self._get_results_as_dict(mapping.keys()) + existing_docs = self._get_results_as_dict(list(mapping.keys())) for key, value in mapping.items(): ref = self._get_cache_entry(key, value.payload, value.metadata, doc_date) @@ -536,7 +536,7 @@ class BaseTransform(AbstractManipulator): def transform_incoming(self, son, collection): """Used while saving data to MongoDB.""" - for (key, value) in son.items(): + for (key, value) in list(son.items()): if isinstance(value, api.CachedValue): son[key] = value.payload # key is 'value' field here son['meta'] = value.metadata @@ -553,7 +553,7 @@ class BaseTransform(AbstractManipulator): ('_id', 'value', 'meta', 'doc_date')): payload = son.pop('value', None) metadata = son.pop('meta', None) - for (key, value) in son.items(): + for (key, value) in list(son.items()): if isinstance(value, dict): son[key] = self.transform_outgoing(value, collection) if metadata is not None: diff --git a/keystone/tests/unit/test_cache_backend_mongo.py b/keystone/tests/unit/test_cache_backend_mongo.py index a56bf754..8afd5e8e 100644 --- a/keystone/tests/unit/test_cache_backend_mongo.py +++ b/keystone/tests/unit/test_cache_backend_mongo.py @@ -160,7 +160,7 @@ class MockCollection(object): return new if isinstance(obj, dict): new = container() - for key, value in obj.items(): + for key, value in list(obj.items()): new[key] = self._copy_doc(value, container) return new else: