aodh/ceilometer/storage/pymongo_base.py
Igor Degtiarov 8743e3d53a [Mongodb] Implement events on Mongodb and DB2
In this change events are written into database (Mongodb, DB2) and
retrieved from it.

Several methods were implemented for this purpose:
1. record_events --- store events into database
2. get_events, get_event_types, get_trait_types and get_traits --- get data
from db.
Add some attendant changes.

Implements: blueprint mongodb-events-feature
Change-Id: I8a525e5bf307efcc894f581e4de12094a39343e4
2014-07-11 12:05:15 +03:00

495 lines
20 KiB
Python

#
# Copyright Ericsson AB 2013. All rights reserved
#
# Authors: Ildiko Vancsa <ildiko.vancsa@ericsson.com>
# Balazs Gibizer <balazs.gibizer@ericsson.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB and DB2 backends
"""
import operator
import pymongo
from ceilometer.alarm.storage import models as alarm_models
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer.storage import base
from ceilometer.storage import models
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
COMMON_AVAILABLE_CAPABILITIES = {
'meters': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base Connection class for MongoDB and DB2 drivers."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery=None, pagination=None):
"""Return an iterable of models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param pagination: Optional pagination query.
"""
if pagination:
raise NotImplementedError('Pagination not implemented')
metaquery = metaquery or {}
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if resource is not None:
q['_id'] = resource
if source is not None:
q['source'] = source
q.update(metaquery)
for r in self.db.resource.find(q):
for r_meter in r['meter']:
yield models.Meter(
name=r_meter['counter_name'],
type=r_meter['counter_type'],
# Return empty string if 'counter_unit' is not valid for
# backward compatibility.
unit=r_meter.get('counter_unit', ''),
resource_id=r['_id'],
project_id=r['project_id'],
source=r['source'],
user_id=r['user_id'],
)
def update_alarm(self, alarm):
"""Update alarm."""
data = alarm.as_dict()
self.db.alarm.update(
{'alarm_id': alarm.alarm_id},
{'$set': data},
upsert=True)
stored_alarm = self.db.alarm.find({'alarm_id': alarm.alarm_id})[0]
del stored_alarm['_id']
self._ensure_encapsulated_rule_format(stored_alarm)
self._ensure_time_constraints(stored_alarm)
return alarm_models.Alarm(**stored_alarm)
create_alarm = update_alarm
def delete_alarm(self, alarm_id):
"""Delete an alarm."""
self.db.alarm.remove({'alarm_id': alarm_id})
def record_alarm_change(self, alarm_change):
"""Record alarm change event."""
self.db.alarm_history.insert(alarm_change.copy())
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return []
q = pymongo_utils.make_query_from_filter(sample_filter,
require_meter=False)
return self._retrieve_samples(q,
[("timestamp", pymongo.DESCENDING)],
limit)
def get_alarms(self, name=None, user=None, state=None, meter=None,
project=None, enabled=None, alarm_id=None, pagination=None):
"""Yields a lists of alarms that match filters
:param name: The Alarm name.
:param user: Optional ID for user that owns the resource.
:param state: Optional string for alarm state.
:param meter: Optional string for alarms associated with meter.
:param project: Optional ID for project that owns the resource.
:param enabled: Optional boolean to list disable alarm.
:param alarm_id: Optional alarm_id to return one alarm.
:param pagination: Optional pagination query.
"""
if pagination:
raise NotImplementedError('Pagination not implemented')
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if name is not None:
q['name'] = name
if enabled is not None:
q['enabled'] = enabled
if alarm_id is not None:
q['alarm_id'] = alarm_id
if state is not None:
q['state'] = state
if meter is not None:
q['rule.meter_name'] = meter
return self._retrieve_alarms(q, [], None)
def get_alarm_changes(self, alarm_id, on_behalf_of,
user=None, project=None, type=None,
start_timestamp=None, start_timestamp_op=None,
end_timestamp=None, end_timestamp_op=None):
"""Yields list of AlarmChanges describing alarm history
Changes are always sorted in reverse order of occurrence, given
the importance of currency.
Segregation for non-administrative users is done on the basis
of the on_behalf_of parameter. This allows such users to have
visibility on both the changes initiated by themselves directly
(generally creation, rule changes, or deletion) and also on those
changes initiated on their behalf by the alarming service (state
transitions after alarm thresholds are crossed).
:param alarm_id: ID of alarm to return changes for
:param on_behalf_of: ID of tenant to scope changes query (None for
administrative user, indicating all projects)
:param user: Optional ID of user to return changes for
:param project: Optional ID of project to return changes for
:project type: Optional change type
:param start_timestamp: Optional modified timestamp start range
:param start_timestamp_op: Optional timestamp start range operation
:param end_timestamp: Optional modified timestamp end range
:param end_timestamp_op: Optional timestamp end range operation
"""
q = dict(alarm_id=alarm_id)
if on_behalf_of is not None:
q['on_behalf_of'] = on_behalf_of
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if type is not None:
q['type'] = type
if start_timestamp or end_timestamp:
ts_range = pymongo_utils.make_timestamp_range(start_timestamp,
end_timestamp,
start_timestamp_op,
end_timestamp_op)
if ts_range:
q['timestamp'] = ts_range
return self._retrieve_alarm_changes(q,
[("timestamp",
pymongo.DESCENDING)],
None)
def record_events(self, event_models):
"""Write the events to database.
Return a list of events of type models.Event.DUPLICATE in case of
trying to write an already existing event to the database, or
models.Event.UNKONW_PROBLEM in case of any failures with recording the
event in the database.
:param event_models: a list of models.Event objects.
"""
problem_events = []
for event_model in event_models:
traits = []
if event_model.traits:
for trait in event_model.traits:
traits.append({'trait_name': trait.name,
'trait_type': trait.dtype,
'trait_value': trait.value})
try:
self.db.event.insert(
{'_id': event_model.message_id,
'event_type': event_model.event_type,
'timestamp': event_model.generated,
'traits': traits})
except pymongo.errors.DuplicateKeyError:
problem_events.append((models.Event.DUPLICATE,
event_model))
except Exception as ex:
LOG.exception(_("Failed to record event: %s") % ex)
problem_events.append((models.Event.UNKNOWN_PROBLEM,
event_model))
return problem_events
def get_events(self, event_filter):
"""Return a list of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
"""
q = pymongo_utils.make_events_query_from_filter(event_filter)
res_events = []
for event in self.db.event.find(q):
traits = []
for trait in event['traits']:
traits.append(models.Trait(name=trait['trait_name'],
dtype=int(trait['trait_type']),
value=trait['trait_value']))
res_events.append(models.Event(message_id=event['_id'],
event_type=event['event_type'],
generated=event['timestamp'],
traits=traits))
return res_events
def get_event_types(self):
"""Return all event types as an iter of strings."""
event_types = set()
events = self.db.event.find()
for event in events:
event_type = event['event_type']
if event_type not in event_types:
event_types.add(event_type)
yield event_type
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event.
"""
trait_names = set()
events = self.db.event.find({'event_type': event_type})
for event in events:
for trait in event['traits']:
trait_name = trait['trait_name']
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types. Method will return only one trait type. It
# is proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
yield {'name': trait_name,
'data_type': trait['trait_type']}
def get_traits(self, event_type, trait_name=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_name: the name of the Trait to filter by
"""
if not trait_name:
events = self.db.event.find({'event_type': event_type})
else:
# We choose events that simultaneously have event_type and certain
# trait_name, and retrieve events contains only mentioned traits.
events = self.db.event.find({'$and': [{'event_type': event_type},
{'traits.trait_name': trait_name}]},
{'traits': {'$elemMatch':
{'trait_name': trait_name}}
})
traits = []
for event in events:
for trait in event['traits']:
traits.append(models.Trait(name=trait['trait_name'],
dtype=trait['trait_type'],
value=trait['trait_value']))
for trait in sorted(traits, key=operator.attrgetter('dtype')):
yield trait
def query_samples(self, filter_expr=None, orderby=None, limit=None):
return self._retrieve_data(filter_expr, orderby, limit, models.Meter)
def query_alarms(self, filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.Alarm objects."""
return self._retrieve_data(filter_expr, orderby, limit,
alarm_models.Alarm)
def query_alarm_history(self, filter_expr=None, orderby=None, limit=None):
"""Return an iterable of model.AlarmChange objects."""
return self._retrieve_data(filter_expr,
orderby,
limit,
alarm_models.AlarmChange)
def _retrieve_data(self, filter_expr, orderby, limit, model):
if limit == 0:
return []
query_filter = {}
orderby_filter = [("timestamp", pymongo.DESCENDING)]
transformer = pymongo_utils.QueryTransformer()
if orderby is not None:
orderby_filter = transformer.transform_orderby(orderby)
if filter_expr is not None:
query_filter = transformer.transform_filter(filter_expr)
retrieve = {models.Meter: self._retrieve_samples,
alarm_models.Alarm: self._retrieve_alarms,
alarm_models.AlarmChange: self._retrieve_alarm_changes}
return retrieve[model](query_filter, orderby_filter, limit)
def _retrieve_samples(self, query, orderby, limit):
if limit is not None:
samples = self.db.meter.find(query,
limit=limit,
sort=orderby)
else:
samples = self.db.meter.find(query,
sort=orderby)
for s in samples:
# Remove the ObjectId generated by the database when
# the sample was inserted. It is an implementation
# detail that should not leak outside of the driver.
del s['_id']
# Backward compatibility for samples without units
s['counter_unit'] = s.get('counter_unit', '')
# Tolerate absence of recorded_at in older datapoints
s['recorded_at'] = s.get('recorded_at')
yield models.Sample(**s)
def _retrieve_alarms(self, query_filter, orderby, limit):
if limit is not None:
alarms = self.db.alarm.find(query_filter,
limit=limit,
sort=orderby)
else:
alarms = self.db.alarm.find(query_filter, sort=orderby)
for alarm in alarms:
a = {}
a.update(alarm)
del a['_id']
self._ensure_encapsulated_rule_format(a)
self._ensure_time_constraints(a)
yield alarm_models.Alarm(**a)
def _retrieve_alarm_changes(self, query_filter, orderby, limit):
if limit is not None:
alarms_history = self.db.alarm_history.find(query_filter,
limit=limit,
sort=orderby)
else:
alarms_history = self.db.alarm_history.find(
query_filter, sort=orderby)
for alarm_history in alarms_history:
ah = {}
ah.update(alarm_history)
del ah['_id']
yield alarm_models.AlarmChange(**ah)
@classmethod
def _ensure_encapsulated_rule_format(cls, alarm):
"""Ensure the alarm returned by the storage have the correct format.
The previous format looks like:
{
'alarm_id': '0ld-4l3rt',
'enabled': True,
'name': 'old-alert',
'description': 'old-alert',
'timestamp': None,
'meter_name': 'cpu',
'user_id': 'me',
'project_id': 'and-da-boys',
'comparison_operator': 'lt',
'threshold': 36,
'statistic': 'count',
'evaluation_periods': 1,
'period': 60,
'state': "insufficient data",
'state_timestamp': None,
'ok_actions': [],
'alarm_actions': ['http://nowhere/alarms'],
'insufficient_data_actions': [],
'repeat_actions': False,
'matching_metadata': {'key': 'value'}
# or 'matching_metadata': [{'key': 'key', 'value': 'value'}]
}
"""
if isinstance(alarm.get('rule'), dict):
return
alarm['type'] = 'threshold'
alarm['rule'] = {}
alarm['matching_metadata'] = cls._decode_matching_metadata(
alarm['matching_metadata'])
for field in ['period', 'evaluation_periods', 'threshold',
'statistic', 'comparison_operator', 'meter_name']:
if field in alarm:
alarm['rule'][field] = alarm[field]
del alarm[field]
query = []
for key in alarm['matching_metadata']:
query.append({'field': key,
'op': 'eq',
'value': alarm['matching_metadata'][key],
'type': 'string'})
del alarm['matching_metadata']
alarm['rule']['query'] = query
@staticmethod
def _decode_matching_metadata(matching_metadata):
if isinstance(matching_metadata, dict):
# note(sileht): keep compatibility with alarm
# with matching_metadata as a dict
return matching_metadata
else:
new_matching_metadata = {}
for elem in matching_metadata:
new_matching_metadata[elem['key']] = elem['value']
return new_matching_metadata
@staticmethod
def _ensure_time_constraints(alarm):
"""Ensures the alarm has a time constraints field."""
if 'time_constraints' not in alarm:
alarm['time_constraints'] = []