Move mongodb/db2 alarms driver code to alarm tree

This change move the alarms related code of mongodb and db2 driver to
the alarm storage subtree.

Partial implements blueprint dedicated-alarm-database

Change-Id: I9ac0842fdb3cbfc3fd6b9aa76a9ec6df8fee244f
This commit is contained in:
Mehdi Abaakouk 2014-07-18 10:40:51 +02:00
parent 7533bc6ff6
commit d4baab8a13
7 changed files with 464 additions and 248 deletions

View File

@ -0,0 +1,77 @@
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
# Copyright 2013 IBM Corp
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
# Tong Li <litong01@us.ibm.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB2 storage backend
"""
from __future__ import division
import pymongo
from ceilometer.alarm.storage import pymongo_base
from ceilometer.openstack.common import log
from ceilometer import storage
from ceilometer.storage.mongo import utils as pymongo_utils
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""The db2 alarm storage for Ceilometer."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url):
# Since we are using pymongo, even though we are connecting to DB2
# we still have to make sure that the scheme which used to distinguish
# db2 driver from mongodb driver be replaced so that pymongo will not
# produce an exception on the scheme.
url = url.replace('db2:', 'mongodb:', 1)
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.2 to use aggregate(), since we are using mongodb
# as backend for test, the following code is necessary to make sure
# that the test wont try aggregate on older mongodb during the test.
# For db2, the versionArray won't be part of the server_info, so there
# will not be exception when real db2 gets used as backend.
server_info = self.conn.server_info()
if server_info.get('sysInfo'):
self._using_mongodb = True
else:
self._using_mongodb = False
if self._using_mongodb and server_info.get('versionArray') < [2, 2]:
raise storage.StorageBadVersion("Need at least MongoDB 2.2")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
self.upgrade()
def clear(self):
# drop_database command does nothing on db2 database since this has
# not been implemented. However calling this method is important for
# removal of all the empty dbs created during the test runs since
# test run is against mongodb on Jenkins
self.conn.drop_database(self.db)
self.conn.close()

View File

@ -0,0 +1,68 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
# Copyright 2014 Red Hat, Inc
#
# Authors: Doug Hellmann <doug.hellmann@dreamhost.com>
# Julien Danjou <julien@danjou.info>
# Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
from oslo.config import cfg
import pymongo
from ceilometer.alarm.storage import pymongo_base
from ceilometer.openstack.common import log
from ceilometer import storage
from ceilometer.storage.mongo import utils as pymongo_utils
cfg.CONF.import_opt('time_to_live', 'ceilometer.storage',
group="database")
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""Put the alarm data into a MongoDB database."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url):
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instanciate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correcly updated if
# needed.
self.upgrade()
def clear(self):
self.conn.drop_database(self.db)
# Connection will be reopened automatically if needed
self.conn.close()

View File

@ -0,0 +1,290 @@
#
# Copyright Ericsson AB 2013. All rights reserved
#
# Authors: Ildiko Vancsa <ildiko.vancsa@ericsson.com>
# Balazs Gibizer <balazs.gibizer@ericsson.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB and DB2 backends
"""
import pymongo
from ceilometer.alarm.storage import base
from ceilometer.alarm.storage import models
from ceilometer.openstack.common import log
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
COMMON_AVAILABLE_CAPABILITIES = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base Alarm Connection class for MongoDB and DB2 drivers."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def update_alarm(self, alarm):
"""Update alarm."""
data = alarm.as_dict()
self.db.alarm.update(
{'alarm_id': alarm.alarm_id},
{'$set': data},
upsert=True)
stored_alarm = self.db.alarm.find({'alarm_id': alarm.alarm_id})[0]
del stored_alarm['_id']
self._ensure_encapsulated_rule_format(stored_alarm)
self._ensure_time_constraints(stored_alarm)
return models.Alarm(**stored_alarm)
create_alarm = update_alarm
def delete_alarm(self, alarm_id):
"""Delete an alarm."""
self.db.alarm.remove({'alarm_id': alarm_id})
def record_alarm_change(self, alarm_change):
"""Record alarm change event."""
self.db.alarm_history.insert(alarm_change.copy())
def get_alarms(self, name=None, user=None, state=None, meter=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters
:param name: The Alarm name.
:param user: Optional ID for user that owns the resource.
:param state: Optional string for alarm state.
:param meter: Optional string for alarms associated with meter.
:param project: Optional ID for project that owns the resource.
:param enabled: Optional boolean to list disable alarm.
:param alarm_id: Optional alarm_id to return one alarm.
:param pagination: Optional pagination query.
"""
if pagination:
raise NotImplementedError('Pagination not implemented')
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if name is not None:
q['name'] = name
if enabled is not None:
q['enabled'] = enabled
if alarm_id is not None:
q['alarm_id'] = alarm_id
if state is not None:
q['state'] = state
if meter is not None:
q['rule.meter_name'] = meter
return self._retrieve_alarms(q, [], None)
def get_alarm_changes(self, alarm_id, on_behalf_of,
user=None, project=None, type=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None):
"""Yields list of AlarmChanges describing alarm history
Changes are always sorted in reverse order of occurrence, given
the importance of currency.
Segregation for non-administrative users is done on the basis
of the on_behalf_of parameter. This allows such users to have
visibility on both the changes initiated by themselves directly
(generally creation, rule changes, or deletion) and also on those
changes initiated on their behalf by the alarming service (state
transitions after alarm thresholds are crossed).
:param alarm_id: ID of alarm to return changes for
:param on_behalf_of: ID of tenant to scope changes query (None for
administrative user, indicating all projects)
:param user: Optional ID of user to return changes for
:param project: Optional ID of project to return changes for
:project type: Optional change type
:param start_timestamp: Optional modified timestamp start range
:param start_timestamp_op: Optional timestamp start range operation
:param end_timestamp: Optional modified timestamp end range
:param end_timestamp_op: Optional timestamp end range operation
"""
q = dict(alarm_id=alarm_id)
if on_behalf_of is not None:
q['on_behalf_of'] = on_behalf_of
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if type is not None:
q['type'] = type
if start_timestamp or end_timestamp:
ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
end_timestamp,
start_timestamp_op,
end_timestamp_op)
if ts_range:
q['timestamp'] = ts_range
return self._retrieve_alarm_changes(q,
[("timestamp",
pymongo.DESCENDING)],
None)
def query_alarms(self, filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Alarm objects."""
return self._retrieve_data(filter_expr, orderby, limit,
models.Alarm)
def query_alarm_history(self, filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.AlarmChange objects."""
return self._retrieve_data(filter_expr,
orderby,
limit,
models.AlarmChange)
def _retrieve_data(self, filter_expr, orderby, limit, model):
if limit == 0:
return []
query_filter = {}
orderby_filter = [("timestamp", pymongo.DESCENDING)]
transformer = pymongo_utils.QueryTransformer()
if orderby is not None:
orderby_filter = transformer.transform_orderby(orderby)
if filter_expr is not None:
query_filter = transformer.transform_filter(filter_expr)
retrieve = {models.Alarm: self._retrieve_alarms,
models.AlarmChange: self._retrieve_alarm_changes}
return retrieve[model](query_filter, orderby_filter, limit)
def _retrieve_alarms(self, query_filter, orderby, limit):
if limit is not None:
alarms = self.db.alarm.find(query_filter,
limit=limit,
sort=orderby)
else:
alarms = self.db.alarm.find(query_filter, sort=orderby)
for alarm in alarms:
a = {}
a.update(alarm)
del a['_id']
self._ensure_encapsulated_rule_format(a)
self._ensure_time_constraints(a)
yield models.Alarm(**a)
def _retrieve_alarm_changes(self, query_filter, orderby, limit):
if limit is not None:
alarms_history = self.db.alarm_history.find(query_filter,
limit=limit,
sort=orderby)
else:
alarms_history = self.db.alarm_history.find(
query_filter, sort=orderby)
for alarm_history in alarms_history:
ah = {}
ah.update(alarm_history)
del ah['_id']
yield models.AlarmChange(**ah)
@classmethod
def _ensure_encapsulated_rule_format(cls, alarm):
"""Ensure the alarm returned by the storage have the correct format.
The previous format looks like:
{
'alarm_id': '0ld-4l3rt',
'enabled': True,
'name': 'old-alert',
'description': 'old-alert',
'timestamp': None,
'meter_name': 'cpu',
'user_id': 'me',
'project_id': 'and-da-boys',
'comparison_operator': 'lt',
'threshold': 36,
'statistic': 'count',
'evaluation_periods': 1,
'period': 60,
'state': "insufficient data",
'state_timestamp': None,
'ok_actions': [],
'alarm_actions': ['http://nowhere/alarms'],
'insufficient_data_actions': [],
'repeat_actions': False,
'matching_metadata': {'key': 'value'}
# or 'matching_metadata': [{'key': 'key', 'value': 'value'}]
}
"""
if isinstance(alarm.get('rule'), dict):
return
alarm['type'] = 'threshold'
alarm['rule'] = {}
alarm['matching_metadata'] = cls._decode_matching_metadata(
alarm['matching_metadata'])
for field in ['period', 'evaluation_periods', 'threshold',
'statistic', 'comparison_operator', 'meter_name']:
if field in alarm:
alarm['rule'][field] = alarm[field]
del alarm[field]
query = []
for key in alarm['matching_metadata']:
query.append({'field': key,
'op': 'eq',
'value': alarm['matching_metadata'][key],
'type': 'string'})
del alarm['matching_metadata']
alarm['rule']['query'] = query
@staticmethod
def _decode_matching_metadata(matching_metadata):
if isinstance(matching_metadata, dict):
# note(sileht): keep compatibility with alarm
# with matching_metadata as a dict
return matching_metadata
else:
new_matching_metadata = {}
for elem in matching_metadata:
new_matching_metadata[elem['key']] = elem['value']
return new_matching_metadata
@staticmethod
def _ensure_time_constraints(alarm):
"""Ensures the alarm has a time constraints field."""
if 'time_constraints' not in alarm:
alarm['time_constraints'] = []

View File

@ -23,8 +23,6 @@ import operator
import pymongo
from ceilometer.alarm.storage import base as alarm_base
from ceilometer.alarm.storage import models as alarm_models
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer.storage import base
@ -41,10 +39,6 @@ COMMON_AVAILABLE_CAPABILITIES = {
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
'events': {'query': {'simple': True}},
}
@ -54,12 +48,10 @@ AVAILABLE_STORAGE_CAPABILITIES = {
}
class Connection(base.Connection, alarm_base.Connection):
class Connection(base.Connection):
"""Base Connection class for MongoDB and DB2 drivers."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
utils.update_nested(
alarm_base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES))
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
@ -108,31 +100,6 @@ class Connection(base.Connection, alarm_base.Connection):
user_id=r['user_id'],
)
def update_alarm(self, alarm):
"""Update alarm."""
data = alarm.as_dict()
self.db.alarm.update(
{'alarm_id': alarm.alarm_id},
{'$set': data},
upsert=True)
stored_alarm = self.db.alarm.find({'alarm_id': alarm.alarm_id})[0]
del stored_alarm['_id']
self._ensure_encapsulated_rule_format(stored_alarm)
self._ensure_time_constraints(stored_alarm)
return alarm_models.Alarm(**stored_alarm)
create_alarm = update_alarm
def delete_alarm(self, alarm_id):
"""Delete an alarm."""
self.db.alarm.remove({'alarm_id': alarm_id})
def record_alarm_change(self, alarm_change):
"""Record alarm change event."""
self.db.alarm_history.insert(alarm_change.copy())
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
@ -148,89 +115,6 @@ class Connection(base.Connection, alarm_base.Connection):
[("timestamp", pymongo.DESCENDING)],
limit)
def get_alarms(self, name=None, user=None, state=None, meter=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters
:param name: The Alarm name.
:param user: Optional ID for user that owns the resource.
:param state: Optional string for alarm state.
:param meter: Optional string for alarms associated with meter.
:param project: Optional ID for project that owns the resource.
:param enabled: Optional boolean to list disable alarm.
:param alarm_id: Optional alarm_id to return one alarm.
:param pagination: Optional pagination query.
"""
if pagination:
raise NotImplementedError('Pagination not implemented')
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if name is not None:
q['name'] = name
if enabled is not None:
q['enabled'] = enabled
if alarm_id is not None:
q['alarm_id'] = alarm_id
if state is not None:
q['state'] = state
if meter is not None:
q['rule.meter_name'] = meter
return self._retrieve_alarms(q, [], None)
def get_alarm_changes(self, alarm_id, on_behalf_of,
user=None, project=None, type=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None):
"""Yields list of AlarmChanges describing alarm history
Changes are always sorted in reverse order of occurrence, given
the importance of currency.
Segregation for non-administrative users is done on the basis
of the on_behalf_of parameter. This allows such users to have
visibility on both the changes initiated by themselves directly
(generally creation, rule changes, or deletion) and also on those
changes initiated on their behalf by the alarming service (state
transitions after alarm thresholds are crossed).
:param alarm_id: ID of alarm to return changes for
:param on_behalf_of: ID of tenant to scope changes query (None for
administrative user, indicating all projects)
:param user: Optional ID of user to return changes for
:param project: Optional ID of project to return changes for
:project type: Optional change type
:param start_timestamp: Optional modified timestamp start range
:param start_timestamp_op: Optional timestamp start range operation
:param end_timestamp: Optional modified timestamp end range
:param end_timestamp_op: Optional timestamp end range operation
"""
q = dict(alarm_id=alarm_id)
if on_behalf_of is not None:
q['on_behalf_of'] = on_behalf_of
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if type is not None:
q['type'] = type
if start_timestamp or end_timestamp:
ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
end_timestamp,
start_timestamp_op,
end_timestamp_op)
if ts_range:
q['timestamp'] = ts_range
return self._retrieve_alarm_changes(q,
[("timestamp",
pymongo.DESCENDING)],
None)
def record_events(self, event_models):
"""Write the events to database.
@ -345,21 +229,6 @@ class Connection(base.Connection, alarm_base.Connection):
yield trait
def query_samples(self, filter_expr=None, orderby=None, limit=None):
return self._retrieve_data(filter_expr, orderby, limit, models.Meter)
def query_alarms(self, filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Alarm objects."""
return self._retrieve_data(filter_expr, orderby, limit,
alarm_models.Alarm)
def query_alarm_history(self, filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.AlarmChange objects."""
return self._retrieve_data(filter_expr,
orderby,
limit,
alarm_models.AlarmChange)
def _retrieve_data(self, filter_expr, orderby, limit, model):
if limit == 0:
return []
query_filter = {}
@ -370,10 +239,7 @@ class Connection(base.Connection, alarm_base.Connection):
if filter_expr is not None:
query_filter = transformer.transform_filter(filter_expr)
retrieve = {models.Meter: self._retrieve_samples,
alarm_models.Alarm: self._retrieve_alarms,
alarm_models.AlarmChange: self._retrieve_alarm_changes}
return retrieve[model](query_filter, orderby_filter, limit)
return self._retrieve_samples(query_filter, orderby_filter, limit)
def _retrieve_samples(self, query, orderby, limit):
if limit is not None:
@ -394,104 +260,3 @@ class Connection(base.Connection, alarm_base.Connection):
# Tolerate absence of recorded_at in older datapoints
s['recorded_at'] = s.get('recorded_at')
yield models.Sample(**s)
def _retrieve_alarms(self, query_filter, orderby, limit):
if limit is not None:
alarms = self.db.alarm.find(query_filter,
limit=limit,
sort=orderby)
else:
alarms = self.db.alarm.find(query_filter, sort=orderby)
for alarm in alarms:
a = {}
a.update(alarm)
del a['_id']
self._ensure_encapsulated_rule_format(a)
self._ensure_time_constraints(a)
yield alarm_models.Alarm(**a)
def _retrieve_alarm_changes(self, query_filter, orderby, limit):
if limit is not None:
alarms_history = self.db.alarm_history.find(query_filter,
limit=limit,
sort=orderby)
else:
alarms_history = self.db.alarm_history.find(
query_filter, sort=orderby)
for alarm_history in alarms_history:
ah = {}
ah.update(alarm_history)
del ah['_id']
yield alarm_models.AlarmChange(**ah)
@classmethod
def _ensure_encapsulated_rule_format(cls, alarm):
"""Ensure the alarm returned by the storage have the correct format.
The previous format looks like:
{
'alarm_id': '0ld-4l3rt',
'enabled': True,
'name': 'old-alert',
'description': 'old-alert',
'timestamp': None,
'meter_name': 'cpu',
'user_id': 'me',
'project_id': 'and-da-boys',
'comparison_operator': 'lt',
'threshold': 36,
'statistic': 'count',
'evaluation_periods': 1,
'period': 60,
'state': "insufficient data",
'state_timestamp': None,
'ok_actions': [],
'alarm_actions': ['http://nowhere/alarms'],
'insufficient_data_actions': [],
'repeat_actions': False,
'matching_metadata': {'key': 'value'}
# or 'matching_metadata': [{'key': 'key', 'value': 'value'}]
}
"""
if isinstance(alarm.get('rule'), dict):
return
alarm['type'] = 'threshold'
alarm['rule'] = {}
alarm['matching_metadata'] = cls._decode_matching_metadata(
alarm['matching_metadata'])
for field in ['period', 'evaluation_periods', 'threshold',
'statistic', 'comparison_operator', 'meter_name']:
if field in alarm:
alarm['rule'][field] = alarm[field]
del alarm[field]
query = []
for key in alarm['matching_metadata']:
query.append({'field': key,
'op': 'eq',
'value': alarm['matching_metadata'][key],
'type': 'string'})
del alarm['matching_metadata']
alarm['rule']['query'] = query
@staticmethod
def _decode_matching_metadata(matching_metadata):
if isinstance(matching_metadata, dict):
# note(sileht): keep compatibility with alarm
# with matching_metadata as a dict
return matching_metadata
else:
new_matching_metadata = {}
for elem in matching_metadata:
new_matching_metadata[elem['key']] = elem['value']
return new_matching_metadata
@staticmethod
def _ensure_time_constraints(alarm):
"""Ensures the alarm has a time constraints field."""
if 'time_constraints' not in alarm:
alarm['time_constraints'] = []

View File

@ -23,6 +23,7 @@
"""
from ceilometer.alarm.storage import impl_db2 as impl_db2_alarm
from ceilometer.storage import impl_db2
from ceilometer.tests import base as test_base
@ -61,16 +62,23 @@ class CapabilitiesTest(test_base.BaseTestCase):
'stddev': False,
'cardinality': False}}
},
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
'events': {'query': {'simple': True}}
}
actual_capabilities = impl_db2.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_alarm_capabilities(self):
expected_capabilities = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
actual_capabilities = impl_db2_alarm.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},

View File

@ -23,6 +23,7 @@
"""
from ceilometer.alarm.storage import impl_mongodb as impl_mongodb_alarm
from ceilometer.storage import base
from ceilometer.storage import impl_mongodb
from ceilometer.tests import base as test_base
@ -174,16 +175,23 @@ class CapabilitiesTest(test_base.BaseTestCase):
'stddev': True,
'cardinality': True}}
},
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
'events': {'query': {'simple': True}}
}
actual_capabilities = impl_mongodb.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_alarm_capabilities(self):
expected_capabilities = {
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
}
actual_capabilities = impl_mongodb_alarm.Connection.get_capabilities()
self.assertEqual(expected_capabilities, actual_capabilities)
def test_storage_capabilities(self):
expected_capabilities = {
'storage': {'production_ready': True},

View File

@ -146,12 +146,12 @@ ceilometer.poll.central =
ceilometer.alarm.storage =
log = ceilometer.storage.impl_log:Connection
mongodb = ceilometer.storage.impl_mongodb:Connection
mongodb = ceilometer.alarm.storage.impl_mongodb:Connection
mysql = ceilometer.storage.impl_sqlalchemy:Connection
postgresql = ceilometer.storage.impl_sqlalchemy:Connection
sqlite = ceilometer.storage.impl_sqlalchemy:Connection
hbase = ceilometer.storage.impl_hbase:Connection
db2 = ceilometer.storage.impl_db2:Connection
db2 = ceilometer.alarm.storage.impl_db2:Connection
ceilometer.metering.storage =
log = ceilometer.storage.impl_log:Connection