Merge "alarm: Use new gnocchi aggregation API"

This commit is contained in:
Jenkins 2015-03-26 17:13:36 +00:00 committed by Gerrit Code Review
commit 06c4740636
6 changed files with 302 additions and 116 deletions

View File

@ -77,28 +77,35 @@ class GnocchiThresholdEvaluator(evaluator.Evaluator):
def _statistics(self, alarm, start, end):
"""Retrieve statistics over the current window."""
if alarm.type == 'gnocchi_metrics_threshold':
url = ("%s/v1/metric_aggregation/?"
"aggregation=%s&start=%s&end=%s&%s") % (
self.gnocchi_url,
alarm.rule['aggregation_method'],
start, end,
"&".join("metric=%s" % m
for m in alarm.rule['metrics']))
method = 'get'
req = {
'url': self.gnocchi_url + "/v1",
'headers': self._get_headers(),
'params': {
'aggregation': alarm.rule['aggregation_method'],
'start': start,
'end': end,
}
}
if alarm.type == 'gnocchi_aggregation_by_resources_threshold':
method = 'post'
req['url'] += "/aggregation/resource/%s/metric/%s" % (
alarm.rule['resource_type'], alarm.rule['metric'])
req['data'] = alarm.rule['query']
elif alarm.type == 'gnocchi_aggregation_by_metrics_threshold':
req['url'] += "/aggregation/metric"
req['params']['metric[]'] = alarm.rule['metrics']
elif alarm.type == 'gnocchi_resources_threshold':
url = ("%s/v1/resource/%s/%s/metric/%s/measures?"
"aggregation=%s&start=%s&end=%s") % (
self.gnocchi_url,
alarm.rule['resource_type'],
alarm.rule['resource_constraint'],
alarm.rule['metric'],
alarm.rule['aggregation_method'],
start, end)
req['url'] += "/resource/%s/%s/metric/%s/measures" % (
alarm.rule['resource_type'],
alarm.rule['resource_id'], alarm.rule['metric'])
LOG.debug(_('stats query %s') % url)
LOG.debug(_('stats query %s') % req['url'])
try:
r = requests.get(url, headers=self._get_headers())
r = getattr(requests, method)(**req)
except Exception:
LOG.exception(_('alarm stats retrieval failed'))
return []

View File

@ -16,7 +16,6 @@
from oslo_config import cfg
from oslo_serialization import jsonutils
import requests
import uuid
import wsme
from wsme import types as wtypes
@ -80,12 +79,12 @@ class AlarmGnocchiThresholdRule(base.AlarmRule):
return jsonutils.loads(r.text).get('aggregation_methods', [])
class AlarmGnocchiMetricOfResourcesThresholdRule(AlarmGnocchiThresholdRule):
class MetricOfResourceRule(AlarmGnocchiThresholdRule):
metric = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the metric"
resource_constraint = wsme.wsattr(wtypes.text, mandatory=True)
"The id of a resource or a expression to select multiple resources"
resource_id = wsme.wsattr(wtypes.text, mandatory=True)
"The id of a resource"
resource_type = wsme.wsattr(wtypes.text, mandatory=True)
"The resource type"
@ -95,46 +94,95 @@ class AlarmGnocchiMetricOfResourcesThresholdRule(AlarmGnocchiThresholdRule):
'threshold', 'aggregation_method',
'evaluation_periods',
'metric',
'resource_constraint',
'resource_id',
'resource_type'])
return rule
@classmethod
def validate_alarm(cls, alarm):
super(AlarmGnocchiMetricOfResourcesThresholdRule,
super(MetricOfResourceRule,
cls).validate_alarm(alarm)
rule = alarm.gnocchi_resources_threshold_rule
ks_client = keystone_client.get_client()
gnocchi_url = cfg.CONF.alarms.gnocchi_url
headers = {'Content-Type': "application/json",
'X-Auth-Token': ks_client.auth_token}
try:
uuid.UUID(rule.resource_constraint)
except Exception:
auth_project = v2_utils.get_auth_project(alarm.project_id)
if auth_project:
# NOTE(sileht): when we have more complex query allowed
# this should be enhanced to ensure the constraint are still
# scoped to auth_project
rule.resource_constraint += (
u'\u2227project_id=%s' % auth_project)
else:
ks_client = keystone_client.get_client()
gnocchi_url = cfg.CONF.alarms.gnocchi_url
headers = {'Content-Type': "application/json",
'X-Auth-Token': ks_client.auth_token}
try:
r = requests.get("%s/v1/resource/%s/%s" % (
gnocchi_url, rule.resource_type,
rule.resource_constraint),
headers=headers)
except requests.ConnectionError as e:
raise GnocchiUnavailable(e)
if r.status_code == 404:
raise base.EntityNotFound('gnocchi resource',
rule.resource_constraint)
elif r.status_code // 200 != 1:
raise base.ClientSideError(r.body, status_code=r.status_code)
r = requests.get("%s/v1/resource/%s/%s" % (
gnocchi_url, rule.resource_type,
rule.resource_id),
headers=headers)
except requests.ConnectionError as e:
raise GnocchiUnavailable(e)
if r.status_code == 404:
raise base.EntityNotFound('gnocchi resource',
rule.resource_id)
elif r.status_code // 200 != 1:
raise base.ClientSideError(r.content, status_code=r.status_code)
class AlarmGnocchiMetricsThresholdRule(AlarmGnocchiThresholdRule):
class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule):
metric = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the metric"
query = wsme.wsattr(wtypes.text, mandatory=True)
"The query to filter the metric"
resource_type = wsme.wsattr(wtypes.text, mandatory=True)
"The resource type"
def as_dict(self):
rule = self.as_dict_from_keys(['granularity', 'comparison_operator',
'threshold', 'aggregation_method',
'evaluation_periods',
'metric',
'query',
'resource_type'])
return rule
@classmethod
def validate_alarm(cls, alarm):
super(AggregationMetricByResourcesLookupRule,
cls).validate_alarm(alarm)
rule = alarm.gnocchi_aggregation_by_resources_threshold_rule
# check the query string is a valid json
try:
query = jsonutils.loads(rule.query)
except ValueError:
raise wsme.exc.InvalidInput('rule/query', rule.query)
# Scope the alarm to the project id if needed
auth_project = v2_utils.get_auth_project(alarm.project_id)
if auth_project:
rule.query = jsonutils.dumps({
"and": [{"=": {"created_by_project_id": auth_project}},
query]})
# Delegate the query validation to gnocchi
ks_client = keystone_client.get_client()
request = {
'url': "%s/v1/aggregation/resource/%s/metric/%s/measures" % (
cfg.CONF.alarms.gnocchi_url,
rule.resource_type,
rule.metric),
'headers': {'Content-Type': "application/json",
'X-Auth-Token': ks_client.auth_token},
'params': {'aggregation': rule.aggregation_method},
'data': rule.query,
}
try:
r = requests.post(**request)
except requests.ConnectionError as e:
raise GnocchiUnavailable(e)
if r.status_code // 200 != 1:
raise base.ClientSideError(r.content, status_code=r.status_code)
class AggregationMetricsByIdLookupRule(AlarmGnocchiThresholdRule):
metrics = wsme.wsattr([wtypes.text], mandatory=True)
"A list of metric Ids"

View File

@ -83,11 +83,11 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
granularity=60,
metric='cpu_util',
resource_type='instance',
resource_constraint='my_instance')
resource_id='my_instance')
),
models.Alarm(name='group_running_idle',
description='group_running_idle',
type='gnocchi_metrics_threshold',
type='gnocchi_aggregation_by_metrics_threshold',
enabled=True,
user_id='foobar',
project_id='snafu',
@ -109,6 +109,33 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83',
'9ddc209f-42f8-41e1-b8f1-8804f59c4053']),
),
models.Alarm(name='instance_not_running',
description='instance_running_hot',
type='gnocchi_aggregation_by_resources_threshold',
enabled=True,
user_id='foobar',
project_id='snafu',
alarm_id=str(uuid.uuid4()),
state='insufficient data',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
insufficient_data_actions=[],
ok_actions=[],
alarm_actions=[],
repeat_actions=False,
time_constraints=[],
rule=dict(
comparison_operator='gt',
threshold=80.0,
evaluation_periods=6,
aggregation_method='mean',
granularity=50,
metric='cpu_util',
resource_type='instance',
query='{"=": {"server_group": '
'"my_autoscaling_group"}}')
),
]
@staticmethod
@ -132,10 +159,13 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v
for v in moves.xrange(1, 4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v
for v in moves.xrange(6)])
self.requests.get.side_effect = [Exception('boom'),
FakeResponse(500, "error"),
means,
maxs]
self.requests.post.side_effect = [FakeResponse(500, "error"), avgs2]
self._evaluate_all_alarms()
self._assert_all_alarms('insufficient data')
self._evaluate_all_alarms()
@ -144,6 +174,7 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
def test_simple_insufficient(self):
self._set_all_alarms('ok')
self.requests.get.return_value = FakeResponse(200, [])
self.requests.post.return_value = FakeResponse(200, [])
self._evaluate_all_alarms()
self._assert_all_alarms('insufficient data')
expected = [mock.call(alarm.alarm_id, state='insufficient data')
@ -169,8 +200,11 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(1, 7)])
self.requests.get.side_effect = [avgs, maxs]
self.requests.post.side_effect = [avgs2]
self._evaluate_all_alarms()
expected_headers = {'X-Auth-Token': 'fake_token',
@ -178,19 +212,33 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
start_alarm1 = "2015-01-26T12:51:00"
start_alarm2 = "2015-01-26T12:32:00"
start_alarm3 = "2015-01-26T12:51:10"
end = "2015-01-26T12:57:00"
self.assertEqual([
mock.call('http://localhost:8041/v1/resource/instance/my_instance/'
'metric/cpu_util/measures?aggregation=mean'
'&start=' + start_alarm1 + '&end=' + end,
mock.call(url='http://localhost:8041/v1/resource/instance/'
'my_instance/metric/cpu_util/measures',
params={'aggregation': 'mean',
'start': start_alarm1, 'end': end},
headers=expected_headers),
mock.call('http://localhost:8041/v1/metric_aggregation/?'
'aggregation=max&start=' + start_alarm2 + '&end=' + end +
'&metric=0bb1604d-1193-4c0a-b4b8-74b170e35e83'
'&metric=9ddc209f-42f8-41e1-b8f1-8804f59c4053',
mock.call(url='http://localhost:8041/v1/aggregation/metric',
params={'aggregation': 'max',
'start': start_alarm2, 'end': end,
'metric[]': [
'0bb1604d-1193-4c0a-b4b8-74b170e35e83',
'9ddc209f-42f8-41e1-b8f1-8804f59c4053']},
headers=expected_headers)],
self.requests.get.mock_calls)
self.assertEqual([
mock.call(url='http://localhost:8041/v1/aggregation/resource/'
'instance/metric/cpu_util',
params={'aggregation': 'mean',
'start': start_alarm3, 'end': end},
data='{"=": {"server_group": "my_autoscaling_group"}}',
headers=expected_headers),
],
self.requests.post.mock_calls)
self._assert_all_alarms('alarm')
expected = [mock.call(alarm.alarm_id, state='alarm')
@ -200,9 +248,13 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
reasons = ['Transition to alarm due to 5 samples outside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to alarm due to 4 samples outside'
' threshold, most recent: %s' % maxs.values[-1]]
' threshold, most recent: %s' % maxs.values[-1],
'Transition to alarm due to 6 samples outside'
' threshold, most recent: %s' % avgs2.values[-1],
]
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
self._reason_data('outside', 4, maxs.values[-1])]
self._reason_data('outside', 4, maxs.values[-1]),
self._reason_data('outside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'ok', reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
@ -214,6 +266,9 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] + v
for v in moves.xrange(1, 5)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] - v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
@ -224,9 +279,12 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
reasons = ['Transition to ok due to 5 samples inside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to ok due to 4 samples inside'
' threshold, most recent: %s' % maxs.values[-1]]
' threshold, most recent: %s' % maxs.values[-1],
'Transition to ok due to 6 samples inside'
' threshold, most recent: %s' % avgs2.values[-1]]
reason_datas = [self._reason_data('inside', 5, avgs.values[-1]),
self._reason_data('inside', 4, maxs.values[-1])]
self._reason_data('inside', 4, maxs.values[-1]),
self._reason_data('inside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'alarm', reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
@ -238,6 +296,9 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
@ -253,6 +314,9 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(5)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(-1, 3)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('ok')
@ -270,6 +334,9 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(6)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
@ -289,6 +356,9 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(1, 7)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
@ -299,9 +369,12 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
reasons = ['Transition to alarm due to 5 samples outside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to alarm due to 4 samples outside'
' threshold, most recent: %s' % maxs.values[-1]]
' threshold, most recent: %s' % maxs.values[-1],
'Transition to alarm due to 6 samples outside'
' threshold, most recent: %s' % avgs2.values[-1]]
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
self._reason_data('outside', 4, maxs.values[-1])]
self._reason_data('outside', 4, maxs.values[-1]),
self._reason_data('outside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'ok', reason, reason_data)
for alarm, reason, reason_data
in zip(self.alarms, reasons, reason_datas)]
@ -313,6 +386,9 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
for v in moves.xrange(1, 6)])
maxs = self._get_stats(300, [self.alarms[1].rule['threshold'] - v
for v in moves.xrange(4)])
avgs2 = self._get_stats(50, [self.alarms[2].rule['threshold'] + v
for v in moves.xrange(1, 7)])
self.requests.post.side_effect = [avgs2]
self.requests.get.side_effect = [avgs, maxs]
self._evaluate_all_alarms()
self._assert_all_alarms('alarm')
@ -323,9 +399,12 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
reasons = ['Transition to alarm due to 5 samples outside'
' threshold, most recent: %s' % avgs.values[-1],
'Transition to alarm due to 4 samples outside'
' threshold, most recent: %s' % maxs.values[-1]]
' threshold, most recent: %s' % maxs.values[-1],
'Transition to alarm due to 6 samples outside'
' threshold, most recent: %s' % avgs2.values[-1]]
reason_datas = [self._reason_data('outside', 5, avgs.values[-1]),
self._reason_data('outside', 4, maxs.values[-1])]
self._reason_data('outside', 4, maxs.values[-1]),
self._reason_data('outside', 6, avgs2.values[-1])]
expected = [mock.call(alarm, 'insufficient data',
reason, reason_data)
for alarm, reason, reason_data
@ -345,6 +424,7 @@ class TestGnocchiThresholdEvaluate(base.TestEvaluatorBase):
'timezone': 'Europe/Ljubljana'}
]
self.alarms[1].time_constraints = self.alarms[0].time_constraints
self.alarms[2].time_constraints = self.alarms[0].time_constraints
dt = datetime.datetime(2014, 1, 1, 15, 0, 0,
tzinfo=pytz.timezone('Europe/Ljubljana'))
mock_utcnow.return_value = dt.astimezone(pytz.UTC)

View File

@ -172,12 +172,12 @@ class TestAlarms(v2.FunctionalTest,
evaluation_periods=1,
metric='meter.test',
resource_type='instance',
resource_constraint=(
resource_id=(
'6841c175-d7c4-4bc2-bc7a-1c7832271b8f'),
)
),
models.Alarm(name='name6',
type='gnocchi_metrics_threshold',
type='gnocchi_aggregation_by_metrics_threshold',
enabled=True,
alarm_id='f',
description='f',
@ -202,6 +202,33 @@ class TestAlarms(v2.FunctionalTest,
'a1fb80f4-c242-4f57-87c6-68f47521059e']
),
),
models.Alarm(name='name7',
type='gnocchi_aggregation_by_resources_threshold',
enabled=True,
alarm_id='g',
description='f',
state='insufficient data',
severity='critical',
state_timestamp=constants.MIN_DATETIME,
timestamp=constants.MIN_DATETIME,
ok_actions=[],
insufficient_data_actions=[],
alarm_actions=[],
repeat_actions=True,
user_id=self.auth_headers['X-User-Id'],
project_id=self.auth_headers['X-Project-Id'],
time_constraints=[],
rule=dict(comparison_operator='gt',
threshold=2.0,
aggregation_method='mean',
granularity=60,
evaluation_periods=1,
metric='meter.test',
resource_type='instance',
query='{"=": {"server_group": '
'"my_autoscaling_group"}}')
),
]:
self.alarm_conn.update_alarm(alarm)
@ -225,9 +252,9 @@ class TestAlarms(v2.FunctionalTest,
def test_list_alarms(self):
data = self.get_json('/alarms')
self.assertEqual(6, len(data))
self.assertEqual(7, len(data))
self.assertEqual(set(['name1', 'name2', 'name3', 'name4', 'name5',
'name6']),
'name6', 'name7']),
set(r['name'] for r in data))
self.assertEqual(set(['meter.test', 'meter.mine']),
set(r['threshold_rule']['meter_name']
@ -403,7 +430,7 @@ class TestAlarms(v2.FunctionalTest,
q=[{'field': field,
'op': 'eq',
'value': project}])
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
_test('project')
_test('project_id')
@ -468,7 +495,7 @@ class TestAlarms(v2.FunctionalTest,
% field.split('/', 1)[-1],
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_time_constraint_start(self):
json = {
@ -489,7 +516,7 @@ class TestAlarms(v2.FunctionalTest,
self.post_json('/alarms', params=json, expect_errors=True, status=400,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_duplicate_time_constraint_name(self):
json = {
@ -518,7 +545,7 @@ class TestAlarms(v2.FunctionalTest,
"Time constraint names must be unique for a given alarm.",
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_alarm_null_time_constraint(self):
json = {
@ -552,7 +579,7 @@ class TestAlarms(v2.FunctionalTest,
self.post_json('/alarms', params=json, expect_errors=True, status=400,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_time_constraint_timezone(self):
json = {
@ -574,7 +601,7 @@ class TestAlarms(v2.FunctionalTest,
self.post_json('/alarms', params=json, expect_errors=True, status=400,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_period(self):
json = {
@ -592,7 +619,7 @@ class TestAlarms(v2.FunctionalTest,
self.post_json('/alarms', params=json, expect_errors=True, status=400,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_null_threshold_rule(self):
json = {
@ -625,7 +652,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertIn(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_input_state(self):
json = {
@ -645,7 +672,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertIn(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_input_severity(self):
json = {
@ -666,7 +693,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertIn(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_input_comparison_operator(self):
json = {
@ -687,7 +714,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertIn(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_input_type(self):
json = {
@ -708,7 +735,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertIn(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_input_enabled_str(self):
json = {
@ -732,7 +759,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertEqual(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_input_enabled_int(self):
json = {
@ -756,7 +783,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertEqual(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_combination_alarm_input_operator(self):
json = {
@ -782,7 +809,7 @@ class TestAlarms(v2.FunctionalTest,
self.assertIn(expected_err_msg,
resp.json['error_message']['faultstring'])
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_query(self):
json = {
@ -801,7 +828,7 @@ class TestAlarms(v2.FunctionalTest,
self.post_json('/alarms', params=json, expect_errors=True, status=400,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_query_field_type(self):
json = {
@ -825,7 +852,7 @@ class TestAlarms(v2.FunctionalTest,
fault_string = resp_string['error_message']['faultstring']
self.assertTrue(fault_string.startswith(expected_error_message))
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_query_non_field(self):
json = {
@ -845,7 +872,7 @@ class TestAlarms(v2.FunctionalTest,
fault_string = resp.json['error_message']['faultstring']
self.assertEqual(expected_error_message, fault_string)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_query_non_value(self):
json = {
@ -865,7 +892,7 @@ class TestAlarms(v2.FunctionalTest,
fault_string = resp.json['error_message']['faultstring']
self.assertEqual(expected_error_message, fault_string)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
def test_post_invalid_alarm_have_multiple_rules(self):
json = {
@ -886,7 +913,7 @@ class TestAlarms(v2.FunctionalTest,
resp = self.post_json('/alarms', params=json, expect_errors=True,
status=400, headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
self.assertEqual('threshold_rule and combination_rule cannot '
'be set at the same time',
resp.json['error_message']['faultstring'])
@ -910,7 +937,7 @@ class TestAlarms(v2.FunctionalTest,
resp = self.post_json('/alarms', params=json, expect_errors=True,
status=400, headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
self.assertEqual(
'Unknown argument: "timestamp": '
'not valid for this resource',
@ -949,7 +976,7 @@ class TestAlarms(v2.FunctionalTest,
resp = self.post_json('/alarms', params=json, status=400,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(6, len(alarms))
self.assertEqual(7, len(alarms))
self.assertEqual(error_message,
resp.json['error_message']['faultstring'])
@ -1017,7 +1044,7 @@ class TestAlarms(v2.FunctionalTest,
self.post_json('/alarms', params=json, status=201,
headers=self.auth_headers)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(7, len(alarms))
self.assertEqual(8, len(alarms))
for alarm in alarms:
if alarm.name == 'added_alarm_defaults':
for key in to_check:
@ -1970,18 +1997,18 @@ class TestAlarms(v2.FunctionalTest,
def test_delete_alarm(self):
data = self.get_json('/alarms')
self.assertEqual(6, len(data))
self.assertEqual(7, len(data))
resp = self.delete('/alarms/%s' % data[0]['alarm_id'],
headers=self.auth_headers,
status=204)
self.assertEqual('', resp.body)
alarms = list(self.alarm_conn.get_alarms())
self.assertEqual(5, len(alarms))
self.assertEqual(6, len(alarms))
def test_get_state_alarm(self):
data = self.get_json('/alarms')
self.assertEqual(6, len(data))
self.assertEqual(7, len(data))
resp = self.get_json('/alarms/%s/state' % data[0]['alarm_id'],
headers=self.auth_headers)
@ -1989,7 +2016,7 @@ class TestAlarms(v2.FunctionalTest,
def test_set_state_alarm(self):
data = self.get_json('/alarms')
self.assertEqual(6, len(data))
self.assertEqual(7, len(data))
resp = self.put_json('/alarms/%s/state' % data[0]['alarm_id'],
headers=self.auth_headers,
@ -2001,7 +2028,7 @@ class TestAlarms(v2.FunctionalTest,
def test_set_invalid_state_alarm(self):
data = self.get_json('/alarms')
self.assertEqual(6, len(data))
self.assertEqual(7, len(data))
self.put_json('/alarms/%s/state' % data[0]['alarm_id'],
headers=self.auth_headers,
@ -2421,7 +2448,7 @@ class TestAlarms(v2.FunctionalTest,
'evaluation_periods': 3,
'granularity': 180,
'resource_type': 'instance',
'resource_constraint': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e',
'resource_id': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e',
}
}
@ -2469,13 +2496,13 @@ class TestAlarms(v2.FunctionalTest,
'enabled': False,
'name': 'name_post',
'state': 'ok',
'type': 'gnocchi_metrics_threshold',
'type': 'gnocchi_aggregation_by_metrics_threshold',
'severity': 'critical',
'ok_actions': ['http://something/ok'],
'alarm_actions': ['http://something/alarm'],
'insufficient_data_actions': ['http://something/no'],
'repeat_actions': True,
'gnocchi_metrics_threshold_rule': {
'gnocchi_aggregation_by_metrics_threshold_rule': {
'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb',
'009d4faf-c275-46f0-8f2d-670b15bac2b0'],
'comparison_operator': 'le',
@ -2497,18 +2524,18 @@ class TestAlarms(v2.FunctionalTest,
self._verify_alarm(json, alarms[0])
@mock.patch('ceilometer.keystone_client.get_client')
def test_post_gnocchi_resources_alarm_project_constraint(self, __):
def test_post_gnocchi_aggregation_alarm_project_constraint(self, __):
json = {
'enabled': False,
'name': 'name_post',
'name': 'project_constraint',
'state': 'ok',
'type': 'gnocchi_resources_threshold',
'type': 'gnocchi_aggregation_by_resources_threshold',
'severity': 'critical',
'ok_actions': ['http://something/ok'],
'alarm_actions': ['http://something/alarm'],
'insufficient_data_actions': ['http://something/no'],
'repeat_actions': True,
'gnocchi_resources_threshold_rule': {
'gnocchi_aggregation_by_resources_threshold_rule': {
'metric': 'ameter',
'comparison_operator': 'le',
'aggregation_method': 'count',
@ -2516,7 +2543,7 @@ class TestAlarms(v2.FunctionalTest,
'evaluation_periods': 3,
'granularity': 180,
'resource_type': 'instance',
'resource_constraint': u'server_group=as',
'query': '{"=": {"server_group": "my_autoscaling_group"}}',
}
}
@ -2524,15 +2551,34 @@ class TestAlarms(v2.FunctionalTest,
text=jsonutils.dumps(
{'aggregation_methods': ['count']}))
resource_result = mock.Mock(status_code=200, text="blob")
query_check_result = mock.Mock(status_code=200, text="blob")
expected_query = ('{"and": [{"=": {"created_by_project_id": "%s"}}, '
'{"=": {"server_group": "my_autoscaling_group"}}]}' %
self.auth_headers['X-Project-Id'])
with mock.patch('requests.get',
side_effect=[cap_result, resource_result]):
self.post_json('/alarms', params=json, headers=self.auth_headers)
with mock.patch('requests.post',
side_effect=[query_check_result]) as fake_post:
self.post_json('/alarms', params=json,
headers=self.auth_headers)
self.assertEqual([mock.call(
url=('http://localhost:8041/v1/aggregation/'
'resource/instance/metric/ameter/measures'),
headers={'Content-Type': 'application/json',
'X-Auth-Token': mock.ANY},
params={'aggregation': 'count'},
data=expected_query)],
fake_post.mock_calls),
alarms = list(self.alarm_conn.get_alarms(enabled=False))
self.assertEqual(1, len(alarms))
json['gnocchi_resources_threshold_rule']['resource_constraint'] += (
u'\u2227project_id=%s' % self.auth_headers['X-Project-Id'])
json['gnocchi_aggregation_by_resources_threshold_rule']['query'] = (
expected_query)
self._verify_alarm(json, alarms[0])

View File

@ -100,10 +100,13 @@ Alarms
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.combination.AlarmCombinationRule
:members:
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.AlarmGnocchiMetricOfResourcesThresholdRule
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.MetricOfResourceRule
:members:
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.AlarmGnocchiMetricsThresholdRule
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricByResourcesLookupRule
:members:
.. autotype:: ceilometer.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricsByIdLookupRule
:members:
.. autotype:: ceilometer.api.controllers.v2.alarms.AlarmTimeConstraint

View File

@ -298,14 +298,16 @@ ceilometer.event.publisher =
ceilometer.alarm.rule =
threshold = ceilometer.api.controllers.v2.alarm_rules.threshold:AlarmThresholdRule
combination = ceilometer.api.controllers.v2.alarm_rules.combination:AlarmCombinationRule
gnocchi_resources_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AlarmGnocchiMetricOfResourcesThresholdRule
gnocchi_metrics_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AlarmGnocchiMetricsThresholdRule
gnocchi_resources_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule
gnocchi_aggregation_by_metrics_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule
gnocchi_aggregation_by_resources_threshold = ceilometer.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule
ceilometer.alarm.evaluator =
threshold = ceilometer.alarm.evaluator.threshold:ThresholdEvaluator
combination = ceilometer.alarm.evaluator.combination:CombinationEvaluator
gnocchi_resources_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
gnocchi_metrics_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
gnocchi_aggregation_by_metrics_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
gnocchi_aggregation_by_resources_threshold = ceilometer.alarm.evaluator.gnocchi:GnocchiThresholdEvaluator
ceilometer.alarm.evaluator_service =
default = ceilometer.alarm.service:AlarmEvaluationService