From e78235e0f6625dd7982fa50565b472386111bf3b Mon Sep 17 00:00:00 2001 From: "wang.weiya" Date: Fri, 19 May 2017 16:49:44 +0800 Subject: [PATCH] fix bugs: Merging actions by action_spec should be moved after action_tracker We have created the following template: (alarm_a or alarm_b) and alarm_c cause alarm_d event sequence: 1. alarm_a is reported 2. alarm_b is reported 3. alarm_c is reported --> alarm_d is raised 4. alarm_b is removed --> alarm_d is removed wrongly Change-Id: I3f9795a581eaa4fdd7c0cd7fc859f870b7c503fb --- vitrage/evaluator/scenario_evaluator.py | 38 +++--- .../evaluator/test_scenario_evaluator.py | 120 ++++++++++++++++++ ...complex_and_or_operator_deduced_alarm.yaml | 53 ++++++++ 3 files changed, 193 insertions(+), 18 deletions(-) create mode 100644 vitrage/tests/resources/templates/evaluator/complex_and_or_operator_deduced_alarm.yaml diff --git a/vitrage/evaluator/scenario_evaluator.py b/vitrage/evaluator/scenario_evaluator.py index d0c0efde0..ac60bd25f 100644 --- a/vitrage/evaluator/scenario_evaluator.py +++ b/vitrage/evaluator/scenario_evaluator.py @@ -100,14 +100,14 @@ class ScenarioEvaluator(object): actions = self._process_and_get_actions(before, before_scenarios, ActionMode.UNDO) - actions.update(self._process_and_get_actions(current, + actions.extend(self._process_and_get_actions(current, current_scenarios, ActionMode.DO)) if actions: - LOG.debug("Actions to perform: %s", actions.values()) + LOG.debug("Actions to perform: %s", actions) filtered_actions = \ - self._analyze_and_filter_actions(actions.values()) + self._analyze_and_filter_actions(actions) LOG.debug("Actions filtered: %s", filtered_actions) for action in filtered_actions: self._action_executor.execute(action.specs, action.mode) @@ -139,12 +139,12 @@ class ScenarioEvaluator(object): return before, current def _process_and_get_actions(self, element, triggered_scenarios, mode): - actions = {} + actions = [] for triggered_scenario in triggered_scenarios: LOG.debug("Processing: %s", str(triggered_scenario)) scenario_element = triggered_scenario[0] scenario = triggered_scenario[1] - actions.update(self._process_scenario(element, + actions.extend(self._process_scenario(element, scenario, scenario_element, mode)) @@ -153,7 +153,7 @@ class ScenarioEvaluator(object): def _process_scenario(self, element, scenario, scenario_elements, mode): if not isinstance(scenario_elements, list): scenario_elements = [scenario_elements] - actions = {} + actions = [] for action in scenario.actions: for scenario_element in scenario_elements: matches = self._evaluate_subgraphs(scenario.subgraphs, @@ -161,7 +161,7 @@ class ScenarioEvaluator(object): scenario_element, action.targets['target']) - actions.update(self._get_actions_from_matches(matches, + actions.extend(self._get_actions_from_matches(matches, mode, action, scenario)) @@ -189,7 +189,7 @@ class ScenarioEvaluator(object): mode, action_spec, scenario): - actions = {} + actions = [] for is_switch_mode, matches in combined_matches: new_mode = mode if is_switch_mode: @@ -197,11 +197,11 @@ class ScenarioEvaluator(object): if mode == ActionMode.DO else ActionMode.DO for match in matches: - spec, action_id = self._get_action_spec(action_spec, match) + spec = self._get_action_spec(action_spec, match) items_ids = [match[1].vertex_id for match in match.items()] match_hash = hash(tuple(sorted(items_ids))) - actions[action_id] = ActionInfo(spec, new_mode, - scenario.id, match_hash) + actions.append(ActionInfo(spec, new_mode, + scenario.id, match_hash)) return actions @@ -211,12 +211,9 @@ class ScenarioEvaluator(object): real_items = { target: match[target_id] for target, target_id in targets.items() } - revised_spec = ActionSpecs(action_spec.type, - real_items, - action_spec.properties) - # noinspection PyTypeChecker - action_id = ScenarioEvaluator._generate_action_id(revised_spec) - return revised_spec, action_id + return ActionSpecs(action_spec.type, + real_items, + action_spec.properties) @staticmethod def _generate_action_id(action_spec): @@ -248,7 +245,12 @@ class ScenarioEvaluator(object): actions_to_perform[key] = undo_action elif new_dominant != prev_dominant: actions_to_perform[key] = new_dominant - return actions_to_perform.values() + + # filter the same action + final_actions = {ScenarioEvaluator._generate_action_id(action.specs): + action for action in actions_to_perform.values()} + + return final_actions.values() def _find_vertex_subgraph_matching(self, subgraphs, diff --git a/vitrage/tests/functional/evaluator/test_scenario_evaluator.py b/vitrage/tests/functional/evaluator/test_scenario_evaluator.py index f6d093ab2..ae89b4a16 100644 --- a/vitrage/tests/functional/evaluator/test_scenario_evaluator.py +++ b/vitrage/tests/functional/evaluator/test_scenario_evaluator.py @@ -1145,6 +1145,126 @@ class TestScenarioEvaluator(TestFunctionalBase): alarms = self._get_alarms_on_host(host_v, processor.entity_graph) self.assertEqual(0, len(alarms)) + def test_both_and_or_operator_for_tracker(self): + """(alarm_a or alarm_b) and alarm_c use case + + We have created the following template: + (alarm_a or alarm_b) and alarm_c cause alarm_d + + 1. alarm_a is reported + 2. alarm_b is reported + 3. alarm_c is reported --> alarm_d is raised + 4. alarm_b is removed --> alarm_d should not be removed + 5. alarm_a is removed --> alarm_d should be removed + + """ + + event_queue, processor, evaluator = self._init_system() + entity_graph = processor.entity_graph + + # constants + num_orig_vertices = entity_graph.num_vertices() + num_orig_edges = entity_graph.num_edges() + + host_v = self._get_entity_from_graph(NOVA_HOST_DATASOURCE, + _TARGET_HOST, + _TARGET_HOST, + entity_graph) + self.assertEqual('AVAILABLE', host_v[VProps.AGGREGATED_STATE], + 'host should be AVAILABLE when starting') + + # generate nagios alarm_a to trigger + test_vals = {'status': 'WARNING', + 'service': 'alarm_a'} + test_vals.update(_NAGIOS_TEST_INFO) + generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals) + alarm_a_test = mock_driver.generate_random_events_list(generator)[0] + + host_v = self.get_host_after_event(event_queue, alarm_a_test, + processor, _TARGET_HOST) + alarms = self._get_alarms_on_host(host_v, entity_graph) + self.assertEqual(1, len(alarms)) + self.assertEqual(num_orig_vertices + 1, entity_graph.num_vertices()) + self.assertEqual(num_orig_edges + 1, entity_graph.num_edges()) + + # generate nagios alarm_b to trigger + test_vals = {'status': 'WARNING', + 'service': 'alarm_b'} + test_vals.update(_NAGIOS_TEST_INFO) + generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals) + alarm_b_test = mock_driver.generate_random_events_list(generator)[0] + + host_v = self.get_host_after_event(event_queue, alarm_b_test, + processor, _TARGET_HOST) + alarms = self._get_alarms_on_host(host_v, entity_graph) + self.assertEqual(2, len(alarms)) + self.assertEqual(num_orig_vertices + 2, entity_graph.num_vertices()) + self.assertEqual(num_orig_edges + 2, entity_graph.num_edges()) + + # generate nagios alarm_c to trigger, alarm_d is raised + test_vals = {'status': 'WARNING', + 'service': 'alarm_c'} + test_vals.update(_NAGIOS_TEST_INFO) + generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals) + alarm_c_test = mock_driver.generate_random_events_list(generator)[0] + + host_v = self.get_host_after_event(event_queue, alarm_c_test, + processor, _TARGET_HOST) + alarms = self._get_alarms_on_host(host_v, entity_graph) + self.assertEqual(4, len(alarms)) + self.assertEqual(num_orig_vertices + 4, entity_graph.num_vertices()) + self.assertEqual(num_orig_edges + 4, entity_graph.num_edges()) + + # remove nagios alarm_b, alarm_d should not be removed + test_vals = {'status': 'OK', + 'service': 'alarm_b'} + test_vals.update(_NAGIOS_TEST_INFO) + generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals) + alarm_b_ok = mock_driver.generate_random_events_list(generator)[0] + + host_v = self.get_host_after_event(event_queue, alarm_b_ok, + processor, _TARGET_HOST) + alarms = self._get_alarms_on_host(host_v, entity_graph) + self.assertEqual(3, len(alarms)) + + query = {VProps.CATEGORY: EntityCategory.ALARM, + VProps.IS_DELETED: True} + deleted_alarms = entity_graph.neighbors(host_v.vertex_id, + vertex_attr_filter=query) + self.assertEqual(num_orig_vertices + len(deleted_alarms) + 3, + entity_graph.num_vertices()) + + query = {VProps.IS_DELETED: True} + deleted_edges = entity_graph.neighbors(host_v.vertex_id, + edge_attr_filter=query) + self.assertEqual(num_orig_edges + len(deleted_edges) + 3, + entity_graph.num_edges()) + + # remove nagios alarm_a, alarm_d should be removed + test_vals = {'status': 'OK', + 'service': 'alarm_a'} + test_vals.update(_NAGIOS_TEST_INFO) + generator = mock_driver.simple_nagios_alarm_generators(1, 1, test_vals) + alarm_a_ok = mock_driver.generate_random_events_list(generator)[0] + + host_v = self.get_host_after_event(event_queue, alarm_a_ok, + processor, _TARGET_HOST) + alarms = self._get_alarms_on_host(host_v, entity_graph) + self.assertEqual(1, len(alarms)) + + query = {VProps.CATEGORY: EntityCategory.ALARM, + VProps.IS_DELETED: True} + deleted_alarms = entity_graph.neighbors(host_v.vertex_id, + vertex_attr_filter=query) + self.assertEqual(num_orig_vertices + len(deleted_alarms) + 1, + entity_graph.num_vertices()) + + query = {VProps.IS_DELETED: True} + deleted_edges = entity_graph.neighbors(host_v.vertex_id, + edge_attr_filter=query) + self.assertEqual(num_orig_edges + len(deleted_edges) + 1, + entity_graph.num_edges()) + def get_host_after_event(self, event_queue, nagios_event, processor, target_host): processor.process_event(nagios_event) diff --git a/vitrage/tests/resources/templates/evaluator/complex_and_or_operator_deduced_alarm.yaml b/vitrage/tests/resources/templates/evaluator/complex_and_or_operator_deduced_alarm.yaml new file mode 100644 index 000000000..d2cd64607 --- /dev/null +++ b/vitrage/tests/resources/templates/evaluator/complex_and_or_operator_deduced_alarm.yaml @@ -0,0 +1,53 @@ +metadata: + name: complex_and_or_operator_deduced_alarm +definitions: + entities: + - entity: + category: ALARM + type: nagios + name: alarm_a + severity: WARNING + template_id: alarm_a + - entity: + category: ALARM + type: nagios + name: alarm_b + severity: WARNING + template_id: alarm_b + - entity: + category: ALARM + type: nagios + name: alarm_c + severity: WARNING + template_id: alarm_c + - entity: + category: RESOURCE + type: nova.host + template_id: host + relationships: + - relationship: + source: alarm_a + relationship_type: on + target: host + template_id : alarm_a_on_host + - relationship: + source: alarm_b + relationship_type: on + target: host + template_id : alarm_b_on_host + - relationship: + source: alarm_c + relationship_type: on + target: host + template_id : alarm_c_on_host +scenarios: + - scenario: + condition: (alarm_a_on_host or alarm_b_on_host) and alarm_c_on_host + actions: + - action: + action_type: raise_alarm + properties: + alarm_name: alarm_d + severity: WARNING + action_target: + target: host