Encompassing one source pollsters with common context

After per-source separation of static resources & discovery samples are
publishing with context of separate pollster, so if it is used arithmetic
transformer which handle different meters, it will fail.

Now samples are publishing from common source context, after all
source meters have been collected.

Change-Id: I0d186247b4b87fd94317318c180bdfb9c6da70fc
Closes-bug: #1394228
This commit is contained in:
Igor Degtiarov 2014-12-09 15:51:32 +02:00
parent 9af8f521e3
commit 21935cb0c1
2 changed files with 59 additions and 34 deletions

View File

@ -64,8 +64,8 @@ class Resources(object):
return static_resources + source_discovery return static_resources + source_discovery
@staticmethod @staticmethod
def key(source, pollster): def key(source_name, pollster):
return '%s-%s' % (source.name, pollster.name) return '%s-%s' % (source_name, pollster.name)
class PollingTask(object): class PollingTask(object):
@ -79,7 +79,7 @@ class PollingTask(object):
# elements of the Cartesian product of sources X pollsters # elements of the Cartesian product of sources X pollsters
# with a common interval # with a common interval
self.pollster_matches = set() self.pollster_matches = collections.defaultdict(set)
# per-sink publisher contexts associated with each source # per-sink publisher contexts associated with each source
self.publishers = {} self.publishers = {}
@ -95,41 +95,47 @@ class PollingTask(object):
self.manager.context) self.manager.context)
self.publishers[pipeline.source.name] = publish_context self.publishers[pipeline.source.name] = publish_context
self.publishers[pipeline.source.name].add_pipelines([pipeline]) self.publishers[pipeline.source.name].add_pipelines([pipeline])
self.pollster_matches.update([(pipeline.source, pollster)]) self.pollster_matches[pipeline.source.name].add(pollster)
key = Resources.key(pipeline.source, pollster) key = Resources.key(pipeline.source.name, pollster)
self.resources[key].setup(pipeline) self.resources[key].setup(pipeline)
def poll_and_publish(self): def poll_and_publish(self):
"""Polling sample and publish into pipeline.""" """Polling sample and publish into pipeline."""
cache = {} cache = {}
discovery_cache = {} discovery_cache = {}
for source, pollster in self.pollster_matches: for source_name in self.pollster_matches:
LOG.info(_("Polling pollster %(poll)s in the context of %(src)s"), with self.publishers[source_name] as publisher:
dict(poll=pollster.name, src=source)) for pollster in self.pollster_matches[source_name]:
pollster_resources = None LOG.info(_("Polling pollster %(poll)s in the context of "
if pollster.obj.default_discovery: "%(src)s"),
pollster_resources = self.manager.discover( dict(poll=pollster.name, src=source_name))
[pollster.obj.default_discovery], discovery_cache) pollster_resources = None
key = Resources.key(source, pollster) if pollster.obj.default_discovery:
source_resources = list(self.resources[key].get(discovery_cache)) pollster_resources = self.manager.discover(
polling_resources = (source_resources or pollster_resources) [pollster.obj.default_discovery], discovery_cache)
if not polling_resources: key = Resources.key(source_name, pollster)
LOG.info(_("Skip polling pollster %s, no resources found"), source_resources = list(
pollster.name) self.resources[key].get(discovery_cache))
continue polling_resources = (source_resources or
with self.publishers[source.name] as publisher: pollster_resources)
try: if not polling_resources:
samples = list(pollster.obj.get_samples( LOG.info(_(
manager=self.manager, "Skip polling pollster %s, no resources found"),
cache=cache, pollster.name)
resources=polling_resources continue
))
publisher(samples) try:
except Exception as err: samples = list(pollster.obj.get_samples(
LOG.warning(_( manager=self.manager,
'Continue after error from %(name)s: %(error)s') cache=cache,
% ({'name': pollster.name, 'error': err}), resources=polling_resources
exc_info=True) ))
publisher(samples)
except Exception as err:
LOG.warning(_(
'Continue after error from %(name)s: %(error)s')
% ({'name': pollster.name, 'error': err}),
exc_info=True)
class AgentManager(os_service.Service): class AgentManager(os_service.Service):

View File

@ -361,7 +361,7 @@ class BaseAgentManagerTestCase(base.BaseTestCase):
self.setup_pipeline() self.setup_pipeline()
polling_tasks = self.mgr.setup_polling_tasks() polling_tasks = self.mgr.setup_polling_tasks()
self.assertEqual(1, len(polling_tasks)) self.assertEqual(1, len(polling_tasks))
pollsters = polling_tasks.get(60).pollster_matches pollsters = polling_tasks.get(60).pollster_matches['test_pipeline']
self.assertEqual(2, len(pollsters)) self.assertEqual(2, len(pollsters))
per_task_resources = polling_tasks[60].resources per_task_resources = polling_tasks[60].resources
self.assertEqual(2, len(per_task_resources)) self.assertEqual(2, len(per_task_resources))
@ -679,10 +679,29 @@ class BaseAgentManagerTestCase(base.BaseTestCase):
self.pipeline_cfg[0]['resources'] = [] self.pipeline_cfg[0]['resources'] = []
self.setup_pipeline() self.setup_pipeline()
polling_task = self.mgr.setup_polling_tasks().values()[0] polling_task = self.mgr.setup_polling_tasks().values()[0]
pollster = list(polling_task.pollster_matches)[0][1] pollster = list(polling_task.pollster_matches['test_pipeline'])[0]
LOG = mock.MagicMock() LOG = mock.MagicMock()
with mock.patch('ceilometer.agent.LOG', LOG): with mock.patch('ceilometer.agent.LOG', LOG):
polling_task.poll_and_publish() polling_task.poll_and_publish()
if not self.mgr.discover(): if not self.mgr.discover():
LOG.info.assert_called_with('Skip polling pollster %s, no ' LOG.info.assert_called_with('Skip polling pollster %s, no '
'resources found', pollster.name) 'resources found', pollster.name)
def test_arithmetic_transformer(self):
self.pipeline_cfg[0]['counters'] = ['test', 'testanother']
self.pipeline_cfg[0]['transformers'] = [
{'name': 'arithmetic',
'parameters': {
'target': {'name': 'test_sum',
'unit': default_test_data.unit,
'type': default_test_data.type,
'expr': '$(test) * 10 + $(testanother)'
}
}}
]
self.setup_pipeline()
self.mgr.setup_polling_tasks()[60].poll_and_publish()
samples = self.mgr.pipeline_manager.pipelines[0].publishers[0].samples
self.assertEqual(1, len(samples))
self.assertEqual('test_sum', samples[0].name)
self.assertEqual(11, samples[0].volume)