fix conf
Change-Id: I6b14932a9c603d0f415578b66fec4ff7707f4d43
This commit is contained in:
parent
186a0dad33
commit
eb5b5f268d
@ -22,9 +22,11 @@ TEMPEST_CONFIG=$BASE/new/tempest/etc/tempest.conf
|
|||||||
iniset $TEMPEST_CONFIG service_available vitrage true
|
iniset $TEMPEST_CONFIG service_available vitrage true
|
||||||
|
|
||||||
if [ "$1" = "mock" ]; then
|
if [ "$1" = "mock" ]; then
|
||||||
|
iniset $TEMPEST_CONFIG root_cause_analysis_service zabbix_alarms_per_host 8
|
||||||
iniset $TEMPEST_CONFIG root_cause_analysis_service instances_per_host 50
|
iniset $TEMPEST_CONFIG root_cause_analysis_service instances_per_host 50
|
||||||
iniset $TEMPEST_CONFIG root_cause_analysis_service snapshots_interval 60
|
iniset $TEMPEST_CONFIG root_cause_analysis_service snapshots_interval 60
|
||||||
else
|
else
|
||||||
|
iniset $TEMPEST_CONFIG root_cause_analysis_service zabbix_alarms_per_host 2
|
||||||
iniset $TEMPEST_CONFIG root_cause_analysis_service instances_per_host 2
|
iniset $TEMPEST_CONFIG root_cause_analysis_service instances_per_host 2
|
||||||
iniset $TEMPEST_CONFIG root_cause_analysis_service snapshots_interval 120
|
iniset $TEMPEST_CONFIG root_cause_analysis_service snapshots_interval 120
|
||||||
fi
|
fi
|
||||||
@ -53,4 +55,4 @@ sudo -E stestr init
|
|||||||
echo "Listing existing Tempest tests"
|
echo "Listing existing Tempest tests"
|
||||||
sudo -E stestr list vitrage_tempest_plugin | grep -E "$TESTS" | sort | tee /tmp/vitrage_tempest_tests.list
|
sudo -E stestr list vitrage_tempest_plugin | grep -E "$TESTS" | sort | tee /tmp/vitrage_tempest_tests.list
|
||||||
echo "Testing $1: $TESTS..."
|
echo "Testing $1: $TESTS..."
|
||||||
sudo -E stestr run --serial --subunit --load-list=/tmp/vitrage_tempest_tests.list | subunit-trace --fails
|
sudo -E stestr run --serial --subunit --load-list=/tmp/vitrage_tempest_tests.list | subunit-trace --fails --no-failure-debug
|
||||||
|
@ -35,6 +35,7 @@ RcaServiceGroup = [
|
|||||||
default=120,
|
default=120,
|
||||||
min=10,
|
min=10,
|
||||||
help='Time to wait between subsequent datasource snapshots'),
|
help='Time to wait between subsequent datasource snapshots'),
|
||||||
|
cfg.IntOpt('zabbix_alarms_per_host', default=2),
|
||||||
cfg.StrOpt('aodh_version', default='2', help='Aodh version'),
|
cfg.StrOpt('aodh_version', default='2', help='Aodh version'),
|
||||||
cfg.StrOpt('ceilometer_version', default='2', help='Ceilometer version'),
|
cfg.StrOpt('ceilometer_version', default='2', help='Ceilometer version'),
|
||||||
cfg.StrOpt('nova_version', default='2.11', help='Nova version'),
|
cfg.StrOpt('nova_version', default='2.11', help='Nova version'),
|
||||||
|
@ -38,9 +38,17 @@ MAX_FAIL_OVER_TIME = 20
|
|||||||
|
|
||||||
class TestLongProcessing(TestActionsBase):
|
class TestLongProcessing(TestActionsBase):
|
||||||
|
|
||||||
|
def tearDown(self):
|
||||||
|
super(TestLongProcessing, self).tearDown()
|
||||||
|
if v_utils.get_first_template(name=TEMPLATE_NAME):
|
||||||
|
v_utils.delete_template(name=TEMPLATE_NAME)
|
||||||
|
time.sleep(SLEEP)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(cls):
|
def setUpClass(cls):
|
||||||
super(TestLongProcessing, cls).setUpClass()
|
super(TestLongProcessing, cls).setUpClass()
|
||||||
|
logger = logging.getLogger('vitrageclient.v1.client').logger
|
||||||
|
logger.setLevel(logging.INFO)
|
||||||
if v_utils.get_first_template(name=TEMPLATE_NAME):
|
if v_utils.get_first_template(name=TEMPLATE_NAME):
|
||||||
v_utils.delete_template(name=TEMPLATE_NAME)
|
v_utils.delete_template(name=TEMPLATE_NAME)
|
||||||
time.sleep(SLEEP)
|
time.sleep(SLEEP)
|
||||||
@ -88,79 +96,66 @@ class TestLongProcessing(TestActionsBase):
|
|||||||
'CRITICAL doctor events expected')
|
'CRITICAL doctor events expected')
|
||||||
finally:
|
finally:
|
||||||
self._remove_doctor_events()
|
self._remove_doctor_events()
|
||||||
if v_utils.get_first_template(name=TEMPLATE_NAME):
|
|
||||||
v_utils.delete_template(name=TEMPLATE_NAME)
|
|
||||||
time.sleep(SLEEP)
|
|
||||||
|
|
||||||
@utils.tempest_logger
|
@utils.tempest_logger
|
||||||
def test_db_init(self):
|
def test_db_init(self):
|
||||||
try:
|
v_utils.add_template(TEMPLATE_NAME)
|
||||||
v_utils.add_template(TEMPLATE_NAME)
|
time.sleep(SLEEP)
|
||||||
time.sleep(SLEEP)
|
|
||||||
|
|
||||||
# 1. check template works well
|
# 1. check template works well
|
||||||
|
self._check_template_instance_3rd_degree_scenarios()
|
||||||
|
|
||||||
|
# 2. check fast fail-over - start from database
|
||||||
|
topo1 = TempestClients.vitrage().topology.get(all_tenants=True)
|
||||||
|
v_utils.restart_graph()
|
||||||
|
time.sleep(MAX_FAIL_OVER_TIME)
|
||||||
|
for i in range(5):
|
||||||
self._check_template_instance_3rd_degree_scenarios()
|
self._check_template_instance_3rd_degree_scenarios()
|
||||||
|
topo2 = TempestClients.vitrage().topology.get(all_tenants=True)
|
||||||
|
self.assert_graph_equal(
|
||||||
|
topo1, topo2, 'comparing graph items iteration ' + str(i))
|
||||||
|
time.sleep(CONF.root_cause_analysis_service.snapshots_interval)
|
||||||
|
|
||||||
# 2. check fast fail-over - start from database
|
v_utils.delete_template(name=TEMPLATE_NAME)
|
||||||
topo1 = TempestClients.vitrage().topology.get(all_tenants=True)
|
time.sleep(SLEEP)
|
||||||
v_utils.restart_graph()
|
self._check_template_instance_3rd_degree_scenarios_deleted()
|
||||||
time.sleep(MAX_FAIL_OVER_TIME)
|
|
||||||
for i in range(5):
|
|
||||||
self._check_template_instance_3rd_degree_scenarios()
|
|
||||||
topo2 = TempestClients.vitrage().topology.get(all_tenants=True)
|
|
||||||
self.assert_graph_equal(
|
|
||||||
topo1, topo2, 'comparing graph items iteration ' + str(i))
|
|
||||||
time.sleep(CONF.root_cause_analysis_service.snapshots_interval)
|
|
||||||
|
|
||||||
v_utils.delete_template(name=TEMPLATE_NAME)
|
|
||||||
time.sleep(SLEEP)
|
|
||||||
self._check_template_instance_3rd_degree_scenarios_deleted()
|
|
||||||
|
|
||||||
finally:
|
|
||||||
if v_utils.get_first_template(name=TEMPLATE_NAME):
|
|
||||||
v_utils.delete_template(name=TEMPLATE_NAME)
|
|
||||||
time.sleep(SLEEP)
|
|
||||||
|
|
||||||
def _check_template_instance_3rd_degree_scenarios(self):
|
def _check_template_instance_3rd_degree_scenarios(self):
|
||||||
|
|
||||||
try:
|
alarm_count = TempestClients.vitrage().alarm.count(
|
||||||
alarm_count = TempestClients.vitrage().alarm.count(
|
all_tenants=True)
|
||||||
all_tenants=True)
|
self.assertEqual(
|
||||||
self.assertEqual(
|
CONF.root_cause_analysis_service.instances_per_host,
|
||||||
CONF.root_cause_analysis_service.instances_per_host,
|
alarm_count['SEVERE'],
|
||||||
alarm_count['SEVERE'],
|
'Each instance should have one SEVERE deduced alarm')
|
||||||
'Each instance should have one SEVERE deduced alarm')
|
self.assertEqual(
|
||||||
self.assertEqual(
|
CONF.root_cause_analysis_service.instances_per_host,
|
||||||
CONF.root_cause_analysis_service.instances_per_host,
|
alarm_count['CRITICAL'],
|
||||||
alarm_count['CRITICAL'],
|
'Each instance should have one CRITICAL deduced alarm')
|
||||||
'Each instance should have one CRITICAL deduced alarm')
|
|
||||||
|
|
||||||
expected_rca = [{VertexProperties.VITRAGE_TYPE: 'zabbix'}] * self.\
|
expected_rca = [{VertexProperties.VITRAGE_TYPE: 'zabbix'}] * CONF.\
|
||||||
conf.mock_graph_datasource.zabbix_alarms_per_host
|
root_cause_analysis_service.zabbix_alarms_per_host
|
||||||
expected_rca.extend([{'name': DEDUCED_1}, {'name': DEDUCED_2}])
|
expected_rca.extend([{'name': DEDUCED_1}, {'name': DEDUCED_2}])
|
||||||
|
|
||||||
def check_rca(alarm):
|
def check_rca(alarm):
|
||||||
rca = TempestClients.vitrage().rca.get(alarm['vitrage_id'],
|
rca = TempestClients.vitrage().rca.get(alarm['vitrage_id'],
|
||||||
all_tenants=True)
|
all_tenants=True)
|
||||||
try:
|
try:
|
||||||
self._check_rca(rca, expected_rca, alarm)
|
self._check_rca(rca, expected_rca, alarm)
|
||||||
return True
|
return True
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('check_rca failed')
|
LOG.exception('check_rca failed')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# 10 threads calling rca api
|
# 10 threads calling rca api
|
||||||
alarms = TempestClients.vitrage().alarm.list(all_tenants=True,
|
alarms = TempestClients.vitrage().alarm.list(all_tenants=True,
|
||||||
vitrage_id='all')
|
vitrage_id='all')
|
||||||
deduced_alarms = g_utils.all_matches(
|
deduced_alarms = g_utils.all_matches(
|
||||||
alarms, vitrage_type='vitrage', name=DEDUCED_2)
|
alarms, vitrage_type='vitrage', name=DEDUCED_2)
|
||||||
workers = futures.ThreadPoolExecutor(max_workers=10)
|
workers = futures.ThreadPoolExecutor(max_workers=10)
|
||||||
workers_result = [r for r in workers.map(check_rca,
|
workers_result = [r for r in workers.map(check_rca,
|
||||||
deduced_alarms)]
|
deduced_alarms)]
|
||||||
self.assertTrue(all(workers_result))
|
self.assertTrue(all(workers_result))
|
||||||
|
|
||||||
finally:
|
|
||||||
v_utils.delete_template(name=TEMPLATE_NAME)
|
|
||||||
|
|
||||||
def _check_template_instance_3rd_degree_scenarios_deleted(self):
|
def _check_template_instance_3rd_degree_scenarios_deleted(self):
|
||||||
alarm_count = TempestClients.vitrage().alarm.count(
|
alarm_count = TempestClients.vitrage().alarm.count(
|
||||||
|
Loading…
Reference in New Issue
Block a user