From 3fa527f75242cecf0462b046034a15a8511e400e Mon Sep 17 00:00:00 2001 From: Terri Yu Date: Tue, 13 Aug 2013 04:41:23 +0000 Subject: [PATCH] Add SQLAlchemy implementation of groupby Implements: blueprint api-group-by New class StatisticsGroupByTest contains the storage tests for group by statistics and has its own test data The storage tests check group by statistics for 1) single field, "user-id" 2) single field, "resource-id" 3) single field, "project-id" 4) single field, "source" 5) single metadata field (not yet implemented) 6) multiple fields 7) multiple metadata fields (not yet implemented) 8) multiple mixed fields, regular and metadata (not yet implemented) 9) single field groupby with query filter 10) single metadata field groupby with query filter (not yet implemented) 11) multiple field group by with multiple query filters 12) multiple metadata field group by with multiple query filters (not yet implemented) 13) single field with period 14) single metadata field with period (not yet implemented) 15) single field with query filter and period 16) single metadata field with query filter and period (not yet implemented) It also includes the implementation for the SQLAlchemy driver. Change-Id: I902db657e424b9894c0382db4869b22317fc25da --- ceilometer/storage/base.py | 2 +- ceilometer/storage/impl_hbase.py | 8 +- ceilometer/storage/impl_log.py | 2 +- ceilometer/storage/impl_mongodb.py | 9 +- ceilometer/storage/impl_sqlalchemy.py | 65 +- ceilometer/storage/models.py | 7 +- ..._compute_duration_by_resource_scenarios.py | 3 +- ..._compute_duration_by_resource_scenarios.py | 3 + tests/storage/base.py | 555 ++++++++++++++++++ tests/storage/test_impl_sqlalchemy.py | 7 + 10 files changed, 632 insertions(+), 29 deletions(-) diff --git a/ceilometer/storage/base.py b/ceilometer/storage/base.py index 6880593e3..1bda217b1 100644 --- a/ceilometer/storage/base.py +++ b/ceilometer/storage/base.py @@ -176,7 +176,7 @@ class Connection(object): """ @abc.abstractmethod - def get_meter_statistics(self, sample_filter, period=None): + def get_meter_statistics(self, sample_filter, period=None, groupby=None): """Return an iterable of model.Statistics instances. The filter must have a meter value set. diff --git a/ceilometer/storage/impl_hbase.py b/ceilometer/storage/impl_hbase.py index 0d4281b53..abccaf78b 100644 --- a/ceilometer/storage/impl_hbase.py +++ b/ceilometer/storage/impl_hbase.py @@ -494,7 +494,7 @@ class Connection(base.Connection): timeutils.delta_seconds(stat.duration_start, stat.duration_end) - def get_meter_statistics(self, sample_filter, period=None): + def get_meter_statistics(self, sample_filter, period=None, groupby=None): """Return an iterable of models.Statistics instances containing meter statistics described by the query parameters. @@ -507,6 +507,9 @@ class Connection(base.Connection): because of all the Thrift traffic it is going to create. """ + if groupby: + raise NotImplementedError("Group by not implemented.") + meter_table = self.conn.table(self.METER_TABLE) q, start, stop = make_query_from_filter(sample_filter) @@ -563,7 +566,8 @@ class Connection(base.Connection): period_end=period_end, duration=None, duration_start=None, - duration_end=None) + duration_end=None, + groupby=None) ) self._update_meter_stats(results[-1], meter) return results diff --git a/ceilometer/storage/impl_log.py b/ceilometer/storage/impl_log.py index 337940450..6aa7aabc1 100644 --- a/ceilometer/storage/impl_log.py +++ b/ceilometer/storage/impl_log.py @@ -133,7 +133,7 @@ class Connection(base.Connection): """ return [] - def get_meter_statistics(self, sample_filter, period=None): + def get_meter_statistics(self, sample_filter, period=None, groupby=None): """Return a dictionary containing meter statistics. described by the query parameters. diff --git a/ceilometer/storage/impl_mongodb.py b/ceilometer/storage/impl_mongodb.py index 44ecccde0..c8fcb10aa 100644 --- a/ceilometer/storage/impl_mongodb.py +++ b/ceilometer/storage/impl_mongodb.py @@ -684,13 +684,16 @@ class Connection(base.Connection): s['counter_unit'] = s.get('counter_unit', '') yield models.Sample(**s) - def get_meter_statistics(self, sample_filter, period=None): + def get_meter_statistics(self, sample_filter, period=None, groupby=None): """Return an iterable of models.Statistics instance containing meter statistics described by the query parameters. The filter must have a meter value set. """ + if groupby: + raise NotImplementedError("Group by not implemented.") + q = make_query_from_filter(sample_filter) if period: @@ -713,6 +716,10 @@ class Connection(base.Connection): query=q, ) + # TODO(jd) implement groupby and remove this code + for r in results['results']: + r['value']['groupby'] = None + return sorted((models.Statistics(**(r['value'])) for r in results['results']), key=operator.attrgetter('period_start')) diff --git a/ceilometer/storage/impl_sqlalchemy.py b/ceilometer/storage/impl_sqlalchemy.py index 1875990c6..4ee3370de 100644 --- a/ceilometer/storage/impl_sqlalchemy.py +++ b/ceilometer/storage/impl_sqlalchemy.py @@ -431,9 +431,8 @@ class Connection(base.Connection): ) @staticmethod - def _make_stats_query(sample_filter): - session = sqlalchemy_session.get_session() - query = session.query( + def _make_stats_query(sample_filter, groupby): + select = [ Meter.counter_unit.label('unit'), func.min(Meter.timestamp).label('tsmin'), func.max(Meter.timestamp).label('tsmax'), @@ -441,12 +440,25 @@ class Connection(base.Connection): func.sum(Meter.counter_volume).label('sum'), func.min(Meter.counter_volume).label('min'), func.max(Meter.counter_volume).label('max'), - func.count(Meter.counter_volume).label('count')) + func.count(Meter.counter_volume).label('count'), + ] + + session = sqlalchemy_session.get_session() + + if groupby: + group_attributes = [getattr(Meter, g) for g in groupby] + select.extend(group_attributes) + + query = session.query(*select) + + if groupby: + query = query.group_by(*group_attributes) return make_query_from_filter(query, sample_filter) @staticmethod - def _stats_result_to_model(result, period, period_start, period_end): + def _stats_result_to_model(result, period, period_start, + period_end, groupby): duration = (timeutils.delta_seconds(result.tsmin, result.tsmax) if result.tsmin is not None and result.tsmax is not None else None) @@ -463,24 +475,35 @@ class Connection(base.Connection): period=period, period_start=period_start, period_end=period_end, + groupby=(dict((g, getattr(result, g)) for g in groupby) + if groupby else None) ) - def get_meter_statistics(self, sample_filter, period=None): + def get_meter_statistics(self, sample_filter, period=None, groupby=None): """Return an iterable of api_models.Statistics instances containing meter statistics described by the query parameters. The filter must have a meter value set. """ - if not period or not sample_filter.start or not sample_filter.end: - res = self._make_stats_query(sample_filter).all()[0] + if groupby: + for group in groupby: + if group not in ['user_id', 'project_id', 'resource_id']: + raise NotImplementedError( + "Unable to group by these fields") if not period: - if res.count: - yield self._stats_result_to_model(res, 0, res.tsmin, res.tsmax) + for res in self._make_stats_query(sample_filter, groupby): + if res.count: + yield self._stats_result_to_model(res, 0, + res.tsmin, res.tsmax, + groupby) return - query = self._make_stats_query(sample_filter) + if not sample_filter.start or not sample_filter.end: + res = self._make_stats_query(sample_filter, None).first() + + query = self._make_stats_query(sample_filter, groupby) # HACK(jd) This is an awful method to compute stats by period, but # since we're trying to be SQL agnostic we have to write portable # code, so here it is, admire! We're going to do one request to get @@ -492,16 +515,16 @@ class Connection(base.Connection): period): q = query.filter(Meter.timestamp >= period_start) q = q.filter(Meter.timestamp < period_end) - r = q.all()[0] - # Don't return results that didn't have any data. - if r.count: - yield self._stats_result_to_model( - result=r, - period=int(timeutils.delta_seconds(period_start, - period_end)), - period_start=period_start, - period_end=period_end, - ) + for r in q.all(): + if r.count: + yield self._stats_result_to_model( + result=r, + period=int(timeutils.delta_seconds(period_start, + period_end)), + period_start=period_start, + period_end=period_end, + groupby=groupby + ) @staticmethod def _row_to_alarm_model(row): diff --git a/ceilometer/storage/models.py b/ceilometer/storage/models.py index 7d5c23ad9..8d6cebd4d 100644 --- a/ceilometer/storage/models.py +++ b/ceilometer/storage/models.py @@ -212,7 +212,8 @@ class Statistics(Model): def __init__(self, unit, min, max, avg, sum, count, period, period_start, period_end, - duration, duration_start, duration_end): + duration, duration_start, duration_end, + groupby): """Create a new statistics object. :param unit: The unit type of the data set @@ -227,13 +228,15 @@ class Statistics(Model): :param duration: The total time for the matching samples :param duration_start: The earliest time for the matching samples :param duration_end: The latest time for the matching samples + :param groupby: The fields used to group the samples. """ Model.__init__(self, unit=unit, min=min, max=max, avg=avg, sum=sum, count=count, period=period, period_start=period_start, period_end=period_end, duration=duration, duration_start=duration_start, - duration_end=duration_end) + duration_end=duration_end, + groupby=groupby) class Alarm(Model): diff --git a/tests/api/v1/test_compute_duration_by_resource_scenarios.py b/tests/api/v1/test_compute_duration_by_resource_scenarios.py index 8b70b617b..775b89090 100644 --- a/tests/api/v1/test_compute_duration_by_resource_scenarios.py +++ b/tests/api/v1/test_compute_duration_by_resource_scenarios.py @@ -64,7 +64,8 @@ class TestComputeDurationByResource(tests_api.TestBase, period_end=None, duration=end - start, duration_start=start, - duration_end=end) + duration_end=end, + groupby=None) self.stubs.Set(self.conn, 'get_meter_statistics', get_meter_statistics) diff --git a/tests/api/v2/test_compute_duration_by_resource_scenarios.py b/tests/api/v2/test_compute_duration_by_resource_scenarios.py index 1ad4e2b24..c6a92a9bd 100644 --- a/tests/api/v2/test_compute_duration_by_resource_scenarios.py +++ b/tests/api/v2/test_compute_duration_by_resource_scenarios.py @@ -80,6 +80,7 @@ class TestComputeDurationByResource(FunctionalTest, duration=duration, duration_start=duration_start, duration_end=duration_end, + groupby=None, ) ] self._stub_interval_func(get_interval) @@ -155,6 +156,7 @@ class TestComputeDurationByResource(FunctionalTest, period=None, period_start=None, period_end=None, + groupby=None, ) ] self._stub_interval_func(get_interval) @@ -185,6 +187,7 @@ class TestComputeDurationByResource(FunctionalTest, period=None, period_start=None, period_end=None, + groupby=None, ) ] return (self.early1, self.early2) diff --git a/tests/storage/base.py b/tests/storage/base.py index c09fd84bf..b62df4f2e 100644 --- a/tests/storage/base.py +++ b/tests/storage/base.py @@ -862,6 +862,561 @@ class StatisticsTest(DBTestBase): assert results.avg == 6 +class StatisticsGroupByTest(DBTestBase): + + def prepare_data(self): + test_sample_data = ( + {'volume': 2, 'user': 'user-1', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 16, 10), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-1', + 'source': 'source-2'}, + {'volume': 2, 'user': 'user-1', 'project': 'project-2', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 15, 37), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-1', + 'source': 'source-2'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 11), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 1, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 10, 40), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 2, 'user': 'user-2', 'project': 'project-1', + 'resource': 'resource-1', 'timestamp': (2013, 8, 1, 14, 59), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 4, 'user': 'user-2', 'project': 'project-2', + 'resource': 'resource-2', 'timestamp': (2013, 8, 1, 17, 28), + 'metadata_flavor': 'm1.large', 'metadata_event': 'event-2', + 'source': 'source-1'}, + {'volume': 4, 'user': 'user-3', 'project': 'project-1', + 'resource': 'resource-3', 'timestamp': (2013, 8, 1, 11, 22), + 'metadata_flavor': 'm1.tiny', 'metadata_event': 'event-2', + 'source': 'source-3'}, + ) + + for test_sample in test_sample_data: + c = sample.Sample( + 'instance', + sample.TYPE_CUMULATIVE, + unit='s', + volume=test_sample['volume'], + user_id=test_sample['user'], + project_id=test_sample['project'], + resource_id=test_sample['resource'], + timestamp=datetime.datetime(*test_sample['timestamp']), + resource_metadata={'flavor': test_sample['metadata_flavor'], + 'event': test_sample['metadata_event'], }, + source=test_sample['source'], + ) + msg = rpc.meter_message_from_counter( + c, + cfg.CONF.publisher_rpc.metering_secret, + ) + self.conn.record_metering_data(msg) + + def test_group_by_user(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, groupby=['user_id'])) + self.assertEqual(len(results), 3) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['user_id'])) + self.assertEqual(groupby_vals_set, set(['user-1', 'user-2', 'user-3'])) + + for r in results: + if r.groupby == {'user_id': 'user-1'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 2) + elif r.groupby == {'user_id': 'user-2'}: + self.assertEqual(r.count, 4) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 8) + self.assertEqual(r.avg, 2) + elif r.groupby == {'user_id': 'user-3'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + + def test_group_by_resource(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['resource_id'])) + self.assertEqual(len(results), 3) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['resource_id'])) + self.assertEqual(groupby_vals_set, set(['resource-1', + 'resource-2', + 'resource-3'])) + for r in results: + if r.groupby == {'resource_id': 'resource-1'}: + self.assertEqual(r.count, 3) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 6) + self.assertEqual(r.avg, 2) + elif r.groupby == {'resource_id': 'resource-2'}: + self.assertEqual(r.count, 3) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 6) + self.assertEqual(r.avg, 2) + elif r.groupby == {'resource_id': 'resource-3'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + + def test_group_by_project(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['project_id'])) + self.assertEqual(len(results), 2) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['project_id'])) + self.assertEqual(groupby_vals_set, set(['project-1', 'project-2'])) + + for r in results: + if r.groupby == {'project_id': 'project-1'}: + self.assertEqual(r.count, 5) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 10) + self.assertEqual(r.avg, 2) + elif r.groupby == {'project_id': 'project-2'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 6) + self.assertEqual(r.avg, 3) + + def test_group_by_source(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, groupby=['source'])) + self.assertEqual(len(results), 3) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['source'])) + self.assertEqual(groupby_vals_set, set(['source-1', + 'source-2', + 'source-3'])) + + for r in results: + if r.groupby == {'source': 'source-1'}: + self.assertEqual(r.count, 4) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 8) + self.assertEqual(r.avg, 2) + elif r.groupby == {'source': 'source-2'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 2) + elif r.groupby == {'source': 'source-3'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + + def test_group_by_unknown_field(self): + f = storage.SampleFilter( + meter='instance', + ) + result = self.conn.get_meter_statistics( + f, groupby=['wtf']) + self.assertRaises( + NotImplementedError, + list, + result) + + def test_group_by_metadata(self): + pass + + def test_group_by_multiple_regular(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + groupby=['user_id', + 'resource_id'])) + self.assertEqual(len(results), 4) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['user_id', 'resource_id'])) + self.assertEqual(groupby_vals_set, set(['user-1', 'user-2', + 'user-3', 'resource-1', + 'resource-2', 'resource-3'])) + + for r in results: + if r.groupby == {'user_id': 'user-1', 'resource_id': 'resource-1'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 2) + elif r.groupby == {'user_id': 'user-2', + 'resource_id': 'resource-1'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 2) + elif r.groupby == {'user_id': 'user-2', + 'resource_id': 'resource-2'}: + self.assertEqual(r.count, 3) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 6) + self.assertEqual(r.avg, 2) + elif r.groupby == {'user_id': 'user-3', + 'resource_id': 'resource-3'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + else: + self.assertNotEqual(r.groupby, {'user_id': 'user-1', + 'resource_id': 'resource-2'}) + self.assertNotEqual(r.groupby, {'user_id': 'user-1', + 'resource_id': 'resource-3'}) + self.assertNotEqual(r.groupby, {'user_id': 'user-2', + 'resource_id': 'resource-3'}) + self.assertNotEqual(r.groupby, {'user_id': 'user-3', + 'resource_id': 'resource-1'}) + self.assertNotEqual(r.groupby, {'user_id': 'user-3', + 'resource_id': 'resource-2'}) + + def test_group_by_multiple_metadata(self): + pass + + def test_group_by_multiple_regular_metadata(self): + pass + + def test_group_by_with_query_filter(self): + f = storage.SampleFilter( + meter='instance', + project='project-1', + ) + results = list(self.conn.get_meter_statistics( + f, + groupby=['resource_id'])) + self.assertEqual(len(results), 3) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['resource_id'])) + self.assertEqual(groupby_vals_set, set(['resource-1', + 'resource-2', + 'resource-3'])) + + for r in results: + if r.groupby == {'resource_id': 'resource-1'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 2) + elif r.groupby == {'resource_id': 'resource-2'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 1) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 1) + elif r.groupby == {'resource_id': 'resource-3'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + + def test_group_by_metadata_with_query_filter(self): + pass + + def test_group_by_with_query_filter_multiple(self): + f = storage.SampleFilter( + meter='instance', + user='user-2', + source='source-1', + ) + results = list(self.conn.get_meter_statistics( + f, + groupby=['project_id', 'resource_id'])) + self.assertEqual(len(results), 3) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['project_id', 'resource_id'])) + self.assertEqual(groupby_vals_set, set(['project-1', 'project-2', + 'resource-1', 'resource-2'])) + + for r in results: + if r.groupby == {'project_id': 'project-1', + 'resource_id': 'resource-1'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 2) + elif r.groupby == {'project_id': 'project-1', + 'resource_id': 'resource-2'}: + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 1) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 1) + elif r.groupby == {'project_id': 'project-2', + 'resource_id': 'resource-2'}: + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + + def test_group_by_metadata_with_query_filter_multiple(self): + pass + + def test_group_by_with_period(self): + f = storage.SampleFilter( + meter='instance', + ) + results = list(self.conn.get_meter_statistics(f, + period=7200, + groupby=['project_id'])) + self.assertEqual(len(results), 4) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['project_id'])) + self.assertEqual(groupby_vals_set, set(['project-1', 'project-2'])) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), + datetime.datetime(2013, 8, 1, 14, 11), + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertEqual(period_start_set, period_start_valid) + + for r in results: + if (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(r.count, 3) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 6) + self.assertEqual(r.avg, 2) + self.assertEqual(r.duration, 4260) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 10, 11)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 11, 22)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 12, 11)) + elif (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 2) + self.assertEqual(r.duration, 4260) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 14, 59)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 16, 10)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 16, 11)) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 2) + self.assertEqual(r.duration, 0) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 15, 37)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 15, 37)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 16, 11)) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + self.assertEqual(r.duration, 0) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 17, 28)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 17, 28)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 18, 11)) + else: + self.assertNotEqual([r.groupby, r.period_start], + [{'project_id': 'project-1'}, + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertNotEqual([r.groupby, r.period_start], + [{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 10, 11)]) + + def test_group_by_metadata_with_period(self): + pass + + def test_group_by_with_query_filter_and_period(self): + f = storage.SampleFilter( + meter='instance', + source='source-1', + ) + results = list(self.conn.get_meter_statistics(f, + period=7200, + groupby=['project_id'])) + self.assertEqual(len(results), 3) + groupby_list = [r.groupby for r in results] + groupby_keys_set = set(x for sub_dict in groupby_list + for x in sub_dict.keys()) + groupby_vals_set = set(x for sub_dict in groupby_list + for x in sub_dict.values()) + self.assertEqual(groupby_keys_set, set(['project_id'])) + self.assertEqual(groupby_vals_set, set(['project-1', 'project-2'])) + period_start_set = set([r.period_start for r in results]) + period_start_valid = set([datetime.datetime(2013, 8, 1, 10, 11), + datetime.datetime(2013, 8, 1, 14, 11), + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertEqual(period_start_set, period_start_valid) + + for r in results: + if (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 10, 11)): + self.assertEqual(r.count, 2) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 1) + self.assertEqual(r.max, 1) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 1) + self.assertEqual(r.duration, 1740) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 10, 11)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 10, 40)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 12, 11)) + elif (r.groupby == {'project_id': 'project-1'} and + r.period_start == datetime.datetime(2013, 8, 1, 14, 11)): + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 2) + self.assertEqual(r.max, 2) + self.assertEqual(r.sum, 2) + self.assertEqual(r.avg, 2) + self.assertEqual(r.duration, 0) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 14, 59)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 14, 59)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 16, 11)) + elif (r.groupby == {'project_id': 'project-2'} and + r.period_start == datetime.datetime(2013, 8, 1, 16, 11)): + self.assertEqual(r.count, 1) + self.assertEqual(r.unit, 's') + self.assertEqual(r.min, 4) + self.assertEqual(r.max, 4) + self.assertEqual(r.sum, 4) + self.assertEqual(r.avg, 4) + self.assertEqual(r.duration, 0) + self.assertEqual(r.duration_start, + datetime.datetime(2013, 8, 1, 17, 28)) + self.assertEqual(r.duration_end, + datetime.datetime(2013, 8, 1, 17, 28)) + self.assertEqual(r.period, 7200) + self.assertEqual(r.period_end, + datetime.datetime(2013, 8, 1, 18, 11)) + else: + self.assertNotEqual([r.groupby, r.period_start], + [{'project_id': 'project-1'}, + datetime.datetime(2013, 8, 1, 16, 11)]) + self.assertNotEqual([r.groupby, r.period_start], + [{'project_id': 'project-2'}, + datetime.datetime(2013, 8, 1, 10, 11)]) + + def test_group_by_metadata_with_query_filter_and_period(self): + pass + + class CounterDataTypeTest(DBTestBase): def prepare_data(self): diff --git a/tests/storage/test_impl_sqlalchemy.py b/tests/storage/test_impl_sqlalchemy.py index ddf6accc3..0b514a56f 100644 --- a/tests/storage/test_impl_sqlalchemy.py +++ b/tests/storage/test_impl_sqlalchemy.py @@ -54,6 +54,13 @@ class StatisticsTest(base.StatisticsTest, SQLAlchemyEngineTestBase): pass +class StatisticsGroupByTest(base.StatisticsGroupByTest, + SQLAlchemyEngineTestBase): + # This is not implemented + def test_group_by_source(self): + pass + + class CounterDataTypeTest(base.CounterDataTypeTest, SQLAlchemyEngineTestBase): pass