enable cachetools for gnocchi alarms
- set ttl to an hour since capabilities rarely change (if ever?) - set cachetools to 1.1.6 since it has some ttl expiration improvements and it's a year old so why not. - i give up trying to disable cache Change-Id: I1b83ffd32cfd631fc340b068e71f61789388ee73
This commit is contained in:
parent
87ec850a29
commit
bebe5204fc
@ -13,6 +13,9 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import threading
|
||||||
|
|
||||||
|
import cachetools
|
||||||
from gnocchiclient import client
|
from gnocchiclient import client
|
||||||
from gnocchiclient import exceptions
|
from gnocchiclient import exceptions
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
@ -47,6 +50,9 @@ class AlarmGnocchiThresholdRule(base.AlarmRule):
|
|||||||
granularity = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60)
|
granularity = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60)
|
||||||
"The time range in seconds over which query"
|
"The time range in seconds over which query"
|
||||||
|
|
||||||
|
cache = cachetools.TTLCache(maxsize=1, ttl=3600)
|
||||||
|
lock = threading.RLock()
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def validate_alarm(cls, alarm):
|
def validate_alarm(cls, alarm):
|
||||||
alarm_rule = getattr(alarm, "%s_rule" % alarm.type)
|
alarm_rule = getattr(alarm, "%s_rule" % alarm.type)
|
||||||
@ -56,10 +62,8 @@ class AlarmGnocchiThresholdRule(base.AlarmRule):
|
|||||||
'aggregation_method should be in %s not %s' % (
|
'aggregation_method should be in %s not %s' % (
|
||||||
cls._get_aggregation_methods(), aggregation_method))
|
cls._get_aggregation_methods(), aggregation_method))
|
||||||
|
|
||||||
# NOTE(sileht): once cachetools is in the requirements
|
|
||||||
# enable it
|
|
||||||
# @cachetools.ttl_cache(maxsize=1, ttl=600)
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
@cachetools.cached(cache, lock=lock)
|
||||||
def _get_aggregation_methods():
|
def _get_aggregation_methods():
|
||||||
conf = pecan.request.cfg
|
conf = pecan.request.cfg
|
||||||
gnocchi_client = client.Client(
|
gnocchi_client = client.Client(
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
import datetime
|
import datetime
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from gnocchiclient import exceptions
|
|
||||||
import mock
|
import mock
|
||||||
import oslo_messaging.conffixture
|
import oslo_messaging.conffixture
|
||||||
from oslo_serialization import jsonutils
|
from oslo_serialization import jsonutils
|
||||||
@ -2920,6 +2919,43 @@ class TestAlarmsRuleGnocchi(TestAlarmsBase):
|
|||||||
for r in data
|
for r in data
|
||||||
if 'gnocchi_resources_threshold_rule' in r))
|
if 'gnocchi_resources_threshold_rule' in r))
|
||||||
|
|
||||||
|
def test_post_gnocchi_metrics_alarm_cached(self):
|
||||||
|
# NOTE(gordc): cache is a decorator and therefore, gets mocked across
|
||||||
|
# entire scenario. ideally we should test both scenario but tough.
|
||||||
|
# assume cache will return aggregation_method == ['count'] always.
|
||||||
|
json = {
|
||||||
|
'enabled': False,
|
||||||
|
'name': 'name_post',
|
||||||
|
'state': 'ok',
|
||||||
|
'type': 'gnocchi_aggregation_by_metrics_threshold',
|
||||||
|
'severity': 'critical',
|
||||||
|
'ok_actions': ['http://something/ok'],
|
||||||
|
'alarm_actions': ['http://something/alarm'],
|
||||||
|
'insufficient_data_actions': ['http://something/no'],
|
||||||
|
'repeat_actions': True,
|
||||||
|
'gnocchi_aggregation_by_metrics_threshold_rule': {
|
||||||
|
'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb',
|
||||||
|
'009d4faf-c275-46f0-8f2d-670b15bac2b0'],
|
||||||
|
'comparison_operator': 'le',
|
||||||
|
'aggregation_method': 'count',
|
||||||
|
'threshold': 50,
|
||||||
|
'evaluation_periods': 3,
|
||||||
|
'granularity': 180,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
with mock.patch('aodh.api.controllers.v2.alarm_rules.'
|
||||||
|
'gnocchi.client') as clientlib:
|
||||||
|
c = clientlib.Client.return_value
|
||||||
|
c.capabilities.list.return_value = {
|
||||||
|
'aggregation_methods': ['count']}
|
||||||
|
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||||
|
|
||||||
|
with mock.patch('aodh.api.controllers.v2.alarm_rules.'
|
||||||
|
'gnocchi.client') as clientlib:
|
||||||
|
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||||
|
self.assertFalse(clientlib.called)
|
||||||
|
|
||||||
def test_post_gnocchi_resources_alarm(self):
|
def test_post_gnocchi_resources_alarm(self):
|
||||||
json = {
|
json = {
|
||||||
'enabled': False,
|
'enabled': False,
|
||||||
@ -2943,38 +2979,15 @@ class TestAlarmsRuleGnocchi(TestAlarmsBase):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
with mock.patch('aodh.api.controllers.v2.alarm_rules.'
|
|
||||||
'gnocchi.client') as clientlib:
|
|
||||||
c = clientlib.Client.return_value
|
|
||||||
c.capabilities.list.side_effect = Exception("boom!")
|
|
||||||
resp = self.post_json('/alarms', params=json,
|
|
||||||
headers=self.auth_headers,
|
|
||||||
expect_errors=True)
|
|
||||||
self.assertEqual(503, resp.status_code, resp.body)
|
|
||||||
|
|
||||||
with mock.patch('aodh.api.controllers.v2.alarm_rules.'
|
|
||||||
'gnocchi.client') as clientlib:
|
|
||||||
c = clientlib.Client.return_value
|
|
||||||
c.capabilities.list.side_effect = (
|
|
||||||
exceptions.ClientException(500, "my_custom_error"))
|
|
||||||
resp = self.post_json('/alarms', params=json,
|
|
||||||
headers=self.auth_headers,
|
|
||||||
expect_errors=True)
|
|
||||||
self.assertEqual(500, resp.status_code, resp.body)
|
|
||||||
self.assertIn('my_custom_error',
|
|
||||||
resp.json['error_message']['faultstring'])
|
|
||||||
|
|
||||||
with mock.patch('aodh.api.controllers.v2.alarm_rules.'
|
with mock.patch('aodh.api.controllers.v2.alarm_rules.'
|
||||||
'gnocchi.client') as clientlib:
|
'gnocchi.client') as clientlib:
|
||||||
c = clientlib.Client.return_value
|
c = clientlib.Client.return_value
|
||||||
c.capabilities.list.return_value = {
|
c.capabilities.list.return_value = {
|
||||||
'aggregation_methods': ['count']}
|
'aggregation_methods': ['count']}
|
||||||
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
self.post_json('/alarms', params=json, headers=self.auth_headers)
|
||||||
expected = [mock.call.capabilities.list(),
|
expected = mock.call.resource.get(
|
||||||
mock.call.resource.get(
|
"instance", "209ef69c-c10c-4efb-90ff-46f4b2d90d2e")
|
||||||
"instance",
|
self.assertIn(expected, c.mock_calls)
|
||||||
"209ef69c-c10c-4efb-90ff-46f4b2d90d2e")]
|
|
||||||
self.assertEqual(expected, c.mock_calls)
|
|
||||||
|
|
||||||
alarms = list(self.alarm_conn.get_alarms(enabled=False))
|
alarms = list(self.alarm_conn.get_alarms(enabled=False))
|
||||||
self.assertEqual(1, len(alarms))
|
self.assertEqual(1, len(alarms))
|
||||||
|
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
other:
|
||||||
|
- |
|
||||||
|
Gnocchi aggregation capabilities are now cached to minimise redundant calls
|
||||||
|
to Gnocchi when validating aggregation methods. The cache is stored
|
||||||
|
in-memory for an hour. If additional aggregations are added to Gnocchi,
|
||||||
|
they will not be proprogated to Aodh's API service for at most an hour or
|
||||||
|
unless the service is restarted.
|
@ -31,4 +31,5 @@ stevedore>=1.5.0 # Apache-2.0
|
|||||||
tooz>=1.28.0 # Apache-2.0
|
tooz>=1.28.0 # Apache-2.0
|
||||||
WebOb>=1.2.3
|
WebOb>=1.2.3
|
||||||
WSME>=0.8
|
WSME>=0.8
|
||||||
|
cachetools>=1.1.6
|
||||||
cotyledon
|
cotyledon
|
||||||
|
Loading…
x
Reference in New Issue
Block a user