Move usage from deprecated Ceilometer API to Gnocchi API
This removes usage of deprecated Ceilometer API 1. Changing trigger type from "OS::Ceilometer::Alarm" to "OS::Aodh::GnocchiAggregationByResourcesAlarm" 2. Add "resource_type" fixed with value "instance" 3. Change some paramters (meter_name-> metric, statistic ->aggregation_method, period-> granularity) 4. Change value from "average" to "mean" in method to compare to the threshold Change-Id: I66f8e4c76a918494df26b4af277453e24421b917
This commit is contained in:
parent
af19e0531f
commit
9e3a6157f0
@ -67,13 +67,13 @@ requestsexceptions==1.2.0
|
||||
simplejson==3.5.1
|
||||
six==1.10.0
|
||||
snowballstemmer==1.2.1
|
||||
Sphinx==1.6.5
|
||||
Sphinx==1.6.2
|
||||
sphinxcontrib-websupport==1.0.1
|
||||
stevedore==1.20.0
|
||||
testrepository==0.0.18
|
||||
testscenarios==0.4
|
||||
testtools==2.2.0
|
||||
tosca-parser==0.8.1
|
||||
tosca-parser==1.0.0
|
||||
traceback2==1.4.0
|
||||
unittest2==1.1.0
|
||||
warlock==1.2.0
|
||||
|
@ -7,7 +7,7 @@ cliff!=2.9.0,>=2.8.0 # Apache-2.0
|
||||
PyYAML>=3.12 # MIT
|
||||
python-dateutil>=2.5.3 # BSD
|
||||
six>=1.10.0 # MIT
|
||||
tosca-parser>=0.8.1 # Apache-2.0
|
||||
tosca-parser>=1.0.0 # Apache-2.0
|
||||
keystoneauth1>=3.4.0 # Apache-2.0
|
||||
python-novaclient>=9.1.0 # Apache-2.0
|
||||
python-heatclient>=1.10.0 # Apache-2.0
|
||||
|
@ -71,9 +71,11 @@ class AutoscalingTest(TestCase):
|
||||
description: trigger
|
||||
condition:
|
||||
constraint: utilization greater_than 50%
|
||||
period: 60
|
||||
granularity: 60
|
||||
evaluations: 1
|
||||
method: average
|
||||
aggregation_method: mean
|
||||
resource_type: instance
|
||||
comparison_operator: gt
|
||||
properties:
|
||||
min_instances: 2
|
||||
max_instances: 10
|
||||
|
@ -26,7 +26,9 @@ ALARM_METER_NAME = {'utilization': 'cpu_util'}
|
||||
ALARM_COMPARISON_OPERATOR = {'greater_than': 'gt', 'gerater_equal': 'ge',
|
||||
'less_than': 'lt', 'less_equal': 'le',
|
||||
'equal': 'eq', 'not_equal': 'ne'}
|
||||
ALARM_STATISTIC = {'average': 'avg'}
|
||||
ALARM_STATISTIC = {'mean': 'mean', 'median': 'median', 'summary': 'sum',
|
||||
'maximum': 'max', 'minimum': 'min', 'last': 'last',
|
||||
'std': 'std', 'first': 'first', 'count': 'count'}
|
||||
|
||||
|
||||
class ToscaClusterAutoscaling(HotResource):
|
||||
@ -62,6 +64,7 @@ class ToscaClusterAutoscaling(HotResource):
|
||||
|
||||
def handle_expansion(self):
|
||||
hot_resources = []
|
||||
hot_type = 'OS::Aodh::GnocchiAggregationByResourcesAlarm'
|
||||
trigger_receivers = defaultdict(list)
|
||||
for node in self.policy.targets:
|
||||
for trigger in self.policy.entity_tpl['triggers']:
|
||||
@ -95,12 +98,15 @@ class ToscaClusterAutoscaling(HotResource):
|
||||
threshold = threshold.strip("%")
|
||||
alarm_prop = {}
|
||||
alarm_prop["description"] = self.policy.entity_tpl['description']
|
||||
alarm_prop["meter_name"] = self.policy.\
|
||||
entity_tpl['triggers'][trigger]['event_type']['metrics']
|
||||
alarm_prop["statistic"] = ALARM_STATISTIC[sample['method']]
|
||||
alarm_prop["period"] = sample["period"]
|
||||
alarm_prop["metric"] = self.policy.\
|
||||
entity_tpl['triggers'][trigger]['event_type']['metric']
|
||||
alarm_prop["aggregation_method"] = \
|
||||
ALARM_STATISTIC[sample['aggregation_method']]
|
||||
alarm_prop["granularity"] = sample["granularity"]
|
||||
alarm_prop["evaluation_periods"] = sample["evaluations"]
|
||||
alarm_prop["threshold"] = threshold
|
||||
alarm_prop["resource_type"] = sample.get("resource_type",
|
||||
"instance")
|
||||
alarm_prop["comparison_operator"] = \
|
||||
ALARM_COMPARISON_OPERATOR[comparison_operator]
|
||||
alarm_prop["repeat_actions"] = "True"
|
||||
@ -111,7 +117,7 @@ class ToscaClusterAutoscaling(HotResource):
|
||||
'channel',
|
||||
'alarm_url']})
|
||||
ceilometer_resources = HotResource(self.nodetemplate,
|
||||
type='OS::Aodh::Alarm',
|
||||
type=hot_type,
|
||||
name=trigger + '_alarm',
|
||||
properties=alarm_prop)
|
||||
hot_resources.append(ceilometer_resources)
|
||||
|
@ -20,8 +20,9 @@ TARGET_CLASS_NAME = 'ToscaMonitoring'
|
||||
|
||||
log = logging.getLogger('heat-translator')
|
||||
|
||||
ALARM_STATISTIC = {'average': 'avg', 'summary': 'sum',
|
||||
'maximum': 'max', 'minimum': 'min'}
|
||||
ALARM_STATISTIC = {'mean': 'mean', 'median': 'median', 'summary': 'sum',
|
||||
'maximum': 'max', 'minimum': 'min', 'last': 'last',
|
||||
'std': 'std', 'first': 'first', 'count': 'count'}
|
||||
|
||||
|
||||
class ToscaMonitoring(HotResource):
|
||||
@ -30,7 +31,7 @@ class ToscaMonitoring(HotResource):
|
||||
toscatype = 'tosca.policies.Monitoring'
|
||||
|
||||
def __init__(self, policy, csar_dir=None):
|
||||
hot_type = "OS::Aodh::Alarm"
|
||||
hot_type = "OS::Aodh::GnocchiAggregationByResourcesAlarm"
|
||||
super(ToscaMonitoring, self).__init__(policy,
|
||||
type=hot_type,
|
||||
csar_dir=csar_dir)
|
||||
@ -39,6 +40,7 @@ class ToscaMonitoring(HotResource):
|
||||
|
||||
def handle_expansion(self):
|
||||
'''handle monitoring resources in case of multiple triggers'''
|
||||
hot_type = 'OS::Aodh::GnocchiAggregationByResourcesAlarm'
|
||||
extra_resources = list()
|
||||
extra_triggers = self.policy.entity_tpl["triggers"]
|
||||
for trigger_name, trigger_dict in extra_triggers.items():
|
||||
@ -46,7 +48,7 @@ class ToscaMonitoring(HotResource):
|
||||
self.filter.append(trigger_name)
|
||||
prop = self._get_monitoring_prop(trigger_dict)
|
||||
mon_resources = HotResource(self.nodetemplate,
|
||||
type='OS::Aodh::Alarm',
|
||||
type=hot_type,
|
||||
name=trigger_name,
|
||||
properties=prop)
|
||||
extra_resources.append(mon_resources)
|
||||
@ -66,14 +68,16 @@ class ToscaMonitoring(HotResource):
|
||||
sample = trigger.get('condition')
|
||||
prop = dict()
|
||||
prop["description"] = sample.get('constraint')
|
||||
prop["meter_name"] = trigger.get('meter_name')
|
||||
if sample.get('method') not in ALARM_STATISTIC:
|
||||
prop["metric"] = trigger.get('metric')
|
||||
if sample.get('aggregation_method') not in ALARM_STATISTIC:
|
||||
msg = _('method should be one of given statistics')
|
||||
log.error(msg)
|
||||
raise InvalidPropertyValueError(what=msg)
|
||||
prop["statistic"] = ALARM_STATISTIC[sample["method"]]
|
||||
prop["period"] = sample.get("period")
|
||||
prop["aggregation_method"] = \
|
||||
ALARM_STATISTIC[sample["aggregation_method"]]
|
||||
prop["granularity"] = sample.get("granularity")
|
||||
prop["threshold"] = sample.get("threshold")
|
||||
prop["resource_type"] = sample.get("resource_type", "instance")
|
||||
prop["comparison_operator"] = sample.get('comparison_operator')
|
||||
prop['evaluation_periods'] = sample.get('evaluations')
|
||||
return prop
|
||||
|
@ -21,9 +21,11 @@ TARGET_CLASS_NAME = 'ToscaAutoscaling'
|
||||
HEAT_TEMPLATE_BASE = """
|
||||
heat_template_version: 2013-05-23
|
||||
"""
|
||||
ALARM_STATISTIC = {'average': 'avg'}
|
||||
ALARM_STATISTIC = {'mean': 'mean', 'median': 'median', 'summary': 'sum',
|
||||
'maximum': 'max', 'minimum': 'min', 'last': 'last',
|
||||
'std': 'std', 'first': 'first', 'count': 'count'}
|
||||
SCALING_RESOURCES = ["OS::Heat::ScalingPolicy", "OS::Heat::AutoScalingGroup",
|
||||
"OS::Aodh::Alarm"]
|
||||
"OS::Aodh::GnocchiAggregationByResourcesAlarm"]
|
||||
|
||||
|
||||
class ToscaAutoscaling(HotResource):
|
||||
@ -40,20 +42,23 @@ class ToscaAutoscaling(HotResource):
|
||||
|
||||
def handle_expansion(self):
|
||||
if self.policy.entity_tpl.get('triggers'):
|
||||
hot_type = 'OS::Aodh::GnocchiAggregationByResourcesAlarm'
|
||||
sample = self.policy.\
|
||||
entity_tpl["triggers"]["resize_compute"]["condition"]
|
||||
prop = {}
|
||||
prop["description"] = self.policy.entity_tpl.get('description')
|
||||
prop["meter_name"] = "cpu_util"
|
||||
prop["metric"] = "cpu_util"
|
||||
if sample:
|
||||
prop["statistic"] = ALARM_STATISTIC[sample["method"]]
|
||||
prop["period"] = sample["period"]
|
||||
prop["aggregation_method"] = \
|
||||
ALARM_STATISTIC[sample["aggregation_method"]]
|
||||
prop["granularity"] = sample["granularity"]
|
||||
prop["threshold"] = sample["evaluations"]
|
||||
prop["resource_type"] = sample.get("resource_type", "instance")
|
||||
prop["comparison_operator"] = "gt"
|
||||
alarm_name = self.name.replace('_scale_in', '').\
|
||||
replace('_scale_out', '')
|
||||
ceilometer_resources = HotResource(self.nodetemplate,
|
||||
type='OS::Aodh::Alarm',
|
||||
type=hot_type,
|
||||
name=alarm_name + '_alarm',
|
||||
properties=prop)
|
||||
hot_resources = [ceilometer_resources]
|
||||
|
@ -30,9 +30,11 @@ topology_template:
|
||||
description: trigger
|
||||
condition:
|
||||
constraint: utilization greater_than 50%
|
||||
period: 60
|
||||
granularity: 60
|
||||
evaluations: 1
|
||||
method: average
|
||||
aggregation_method: mean
|
||||
resource_type: instance
|
||||
comparison_operator: gt
|
||||
properties:
|
||||
min_instances: 2
|
||||
max_instances: 10
|
||||
|
@ -44,13 +44,15 @@ topology_template:
|
||||
description: trigger
|
||||
event_type:
|
||||
type: tosca.events.resource.cpu.utilization
|
||||
metrics: cpu_util
|
||||
metric: cpu_util
|
||||
implementation: Ceilometer
|
||||
condition:
|
||||
constraint: utilization greater_than 50%
|
||||
period: 60
|
||||
granularity: 60
|
||||
evaluations: 1
|
||||
method: average
|
||||
aggregation_method: mean
|
||||
resource_type: instance
|
||||
comparison_operator: gt
|
||||
action:
|
||||
scale_out:
|
||||
type: SCALE_OUT
|
||||
|
@ -31,12 +31,13 @@ resources:
|
||||
scaling_adjustment: -1
|
||||
cooldown: 60
|
||||
asg_alarm:
|
||||
type: OS::Aodh::Alarm
|
||||
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||
properties:
|
||||
meter_name: cpu_util
|
||||
metric: cpu_util
|
||||
description: Simple node autoscaling
|
||||
period: 60
|
||||
statistic: avg
|
||||
granularity: 60
|
||||
aggregation_method: mean
|
||||
threshold: 1
|
||||
resource_type: instance
|
||||
comparison_operator: gt
|
||||
outputs: {}
|
@ -42,9 +42,9 @@ resources:
|
||||
get_resource: my_server_1_cluster
|
||||
type: webhook
|
||||
scale_out_alarm:
|
||||
type: OS::Aodh::Alarm
|
||||
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||
properties:
|
||||
meter_name: cpu_util
|
||||
metric: cpu_util
|
||||
alarm_actions:
|
||||
- get_attr:
|
||||
- my_server_1_scale_out_receiver
|
||||
@ -53,8 +53,9 @@ resources:
|
||||
description: Cluster node autoscaling
|
||||
evaluation_periods: 1
|
||||
repeat_actions: True
|
||||
period: 60
|
||||
statistic: avg
|
||||
granularity: 60
|
||||
aggregation_method: mean
|
||||
threshold: 50
|
||||
resource_type: instance
|
||||
comparison_operator: gt
|
||||
outputs: {}
|
||||
|
@ -14,15 +14,16 @@ resources:
|
||||
scaling_adjustment: 1
|
||||
cooldown: 60
|
||||
low_cpu_usage:
|
||||
type: OS::Aodh::Alarm
|
||||
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||
properties:
|
||||
meter_name: cpu_util
|
||||
metric: cpu_util
|
||||
description: utilization less_than 20%
|
||||
evaluation_periods: 1
|
||||
period: 600
|
||||
statistic: avg
|
||||
granularity: 600
|
||||
aggregation_method: mean
|
||||
threshold: 20
|
||||
comparison_operator: gt
|
||||
resource_type: instance
|
||||
comparison_operator: lt
|
||||
asg_group:
|
||||
type: OS::Heat::AutoScalingGroup
|
||||
properties:
|
||||
@ -41,13 +42,14 @@ resources:
|
||||
scaling_adjustment: -1
|
||||
cooldown: 60
|
||||
high_cpu_usage:
|
||||
type: OS::Aodh::Alarm
|
||||
type: OS::Aodh::GnocchiAggregationByResourcesAlarm
|
||||
properties:
|
||||
meter_name: cpu_util
|
||||
metric: cpu_util
|
||||
description: utilization greater_than 60%
|
||||
evaluation_periods: 1
|
||||
period: 600
|
||||
statistic: avg
|
||||
granularity: 600
|
||||
aggregation_method: mean
|
||||
threshold: 60
|
||||
resource_type: instance
|
||||
comparison_operator: gt
|
||||
outputs: {}
|
||||
|
@ -39,22 +39,22 @@ topology_template:
|
||||
triggers:
|
||||
high_cpu_usage:
|
||||
description: trigger
|
||||
meter_name: cpu_util
|
||||
metric: cpu_util
|
||||
condition:
|
||||
constraint: utilization greater_than 60%
|
||||
threshold: 60
|
||||
period: 600
|
||||
granularity: 600
|
||||
evaluations: 1
|
||||
method: average
|
||||
aggregation_method: mean
|
||||
comparison_operator: gt
|
||||
|
||||
low_cpu_usage:
|
||||
description: trigger
|
||||
meter_name: cpu_util
|
||||
metric: cpu_util
|
||||
condition:
|
||||
constraint: utilization less_than 20%
|
||||
threshold: 20
|
||||
period: 600
|
||||
granularity: 600
|
||||
evaluations: 1
|
||||
method: average
|
||||
comparison_operator: gt
|
||||
aggregation_method: mean
|
||||
comparison_operator: lt
|
||||
|
Loading…
Reference in New Issue
Block a user