Add system tests for LMA Infrastructure Alerting

This change also adds Selenium helpers for testing the Nagios UI.

Change-Id: Ibf5c5b5ccbd4ee996cde1dbf01d79ca3dd0a6bae
This commit is contained in:
Vladimir Ushakov 2016-05-16 16:16:34 +03:00 committed by Simon Pasquier
parent 40e9aeae69
commit d109429c45
7 changed files with 324 additions and 10 deletions

View File

@ -1,4 +1,5 @@
git+git://github.com/openstack/fuel-devops.git@2.9.20 git+git://github.com/openstack/fuel-devops.git@2.9.20
requests requests
selenium
six six
tox tox

View File

@ -35,7 +35,7 @@ class PluginApi(object):
self.helpers = helpers.PluginHelper(self.env) self.helpers = helpers.PluginHelper(self.env)
self.checkers = checkers self.checkers = checkers
self.remote_ops = remote_ops self.remote_ops = remote_ops
self.ui_tester = ui_tester.UITester() self.ui_tester = ui_tester
def __getattr__(self, item): def __getattr__(self, item):
return getattr(self.test, item) return getattr(self.test, item)

View File

@ -212,3 +212,18 @@ class PluginHelper(object):
plugin_version)) plugin_version))
asserts.assert_equal(exit_code, exec_res['exit_code'], asserts.assert_equal(exit_code, exec_res['exit_code'],
msg.format(plugin_name, exit_code)) msg.format(plugin_name, exit_code))
def get_fuel_node_name(self, changed_node):
for node in self.fuel_web.client.list_cluster_nodes(self.cluster_id):
if node["name"] == changed_node:
return node["hostname"]
return None
def fuel_createmirror(self, option="", exit_code=0):
logger.info("Executing 'fuel-createmirror' command.")
with self.env.d_env.get_admin_remote() as remote:
exec_res = remote.execute(
"fuel-createmirror {0}".format(option))
asserts.assert_equal(exit_code, exec_res['exit_code'],
'fuel-createmirror failed:'
' {0}'.format(exec_res['stderr']))

View File

@ -11,8 +11,40 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from proboscis import asserts
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
class UITester(object): def get_driver(ip, anchor, title):
# NOTE(rpromyshlennikov): to prepare to move UI test driver = webdriver.Firefox()
pass driver.get(ip)
WebDriverWait(driver, 120).until(
EC.presence_of_element_located((By.XPATH, anchor)))
asserts.assert_equal(True, title in driver.title,
"Title {0} was not found in {1}!".format(
title, driver.title))
return driver
def get_table(driver, xpath, frame=None):
if frame:
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name(frame))
return driver.find_element_by_xpath(xpath)
def get_table_row(table, row_id):
return table.find_element_by_xpath("tr[{0}]".format(row_id))
def get_table_size(table):
return len(table.find_elements_by_xpath("tr[position() > 0]"))
def get_table_cell(table, row_id, column_id):
row = get_table_row(table, row_id)
return row.find_element_by_xpath("td[{0}]".format(column_id))

View File

@ -13,6 +13,11 @@
# under the License. # under the License.
from fuelweb_test import logger from fuelweb_test import logger
from proboscis import asserts
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from stacklight_tests import base_test from stacklight_tests import base_test
from stacklight_tests.lma_infrastructure_alerting import( from stacklight_tests.lma_infrastructure_alerting import(
@ -34,10 +39,59 @@ class InfraAlertingPluginApi(base_test.PluginApi):
return self.helpers.get_plugin_vip(self.settings.vip_name) return self.helpers.get_plugin_vip(self.settings.vip_name)
def check_plugin_online(self): def check_plugin_online(self):
lma_alerting_vip = self.get_plugin_vip()
logger.info("Check that the Nagios server is running") logger.info("Check that the Nagios server is running")
self.checkers.check_http_get_response( self.checkers.check_http_get_response(self.get_nagios_url())
"http://{0}:{1}@{2}:8001".format(
self.settings.nagios_user, self.settings.nagios_password, def get_nagios_url(self):
lma_alerting_vip)) return "http://{0}:{1}@{2}:8001".format(self.settings.nagios_user,
self.settings.nagios_password,
self.get_plugin_vip())
def get_primary_lma_node(self, exclude=None):
nailgun_nodes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
self.helpers.cluster_id, self.settings.role_name)
lma_nodes = self.fuel_web.get_devops_nodes_by_nailgun_nodes(
nailgun_nodes)
if exclude:
for node in lma_nodes:
if node.name != exclude:
lma_node = node
break
else:
lma_node = lma_nodes[0]
return self.fuel_web.get_pacemaker_resource_location(
lma_node.name, "vip__infrastructure_alerting_mgmt_vip")[0]
def open_nagios_page(self, link_text, anchor):
driver = self.ui_tester.get_driver(self.get_nagios_url(),
"//frame[2]", "Nagios Core")
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("side"))
link = driver.find_element_by_link_text(link_text)
link.click()
driver.switch_to.default_content()
driver.switch_to.frame(driver.find_element_by_name("main"))
WebDriverWait(driver, 120).until(
EC.presence_of_element_located((By.XPATH, anchor)))
return driver
def check_node_in_nagios(self, changed_node, state):
driver = self.open_nagios_page(
'Hosts', "//table[@class='headertable']")
try:
asserts.assert_equal(state, self.node_is_present(
driver, changed_node), "Failed to find node '{0}' on nagios!"
.format(changed_node))
finally:
driver.close()
def node_is_present(self, driver, name):
table = self.ui_tester.get_table(driver,
"/html/body/div[2]/table/tbody")
for ind in xrange(2, self.ui_tester.get_table_size(table) + 1):
node_name = self.ui_tester.get_table_cell(
table, ind, 1).text.rstrip()
if name == node_name:
return True
return False

View File

@ -0,0 +1,210 @@
# coding=utf-8
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import asserts
from proboscis import test
from fuelweb_test.helpers.decorators import log_snapshot_after_test
from stacklight_tests.lma_infrastructure_alerting import api
@test(groups=["plugins"])
class TestLMAInfraAlertingPluginSystem(api.InfraAlertingPluginApi):
"""Class for system testing the LMA Infrastructure Alerting plugin."""
@test(depends_on_groups=['deploy_ha_lma_infrastructure_alerting'],
groups=["add_remove_controller_lma_infrastructure_alerting",
"system", "lma_infrastructure_alerting", "scaling"])
@log_snapshot_after_test
def add_remove_controller_lma_infrastructure_alerting(self):
"""Add/remove controller nodes in existing environment
Scenario:
1. Remove 1 node with the controller role.
2. Re-deploy the cluster.
3. Check the plugin services using the CLI
4. Check in the Nagios UI that the removed node is no
longer monitored.
5. Run the health checks (OSTF).
6. Add 1 new node with the controller role.
7. Re-deploy the cluster.
8. Check the plugin services using the CLI.
9. Check in the Nagios UI that the new node is monitored.
10. Run the health checks (OSTF).
Duration 60m
"""
self.env.revert_snapshot("deploy_ha_lma_infrastructure_alerting")
target_node = self.helpers.get_fuel_node_name('slave-02_controller')
self.helpers.remove_node_from_cluster({'slave-02': ['controller']})
self.helpers.run_ostf(should_fail=1)
self.check_plugin_online()
self.check_node_in_nagios(target_node, False)
self.helpers.add_node_to_cluster({'slave-02': ['controller']})
self.helpers.run_ostf(should_fail=1)
self.check_plugin_online()
target_node = self.helpers.get_fuel_node_name('slave-02_controller')
self.check_node_in_nagios(target_node, True)
@test(depends_on_groups=['deploy_ha_lma_infrastructure_alerting'],
groups=["add_remove_compute_lma_infrastructure_alerting", "system",
"lma_infrastructure_alerting", "scaling"])
@log_snapshot_after_test
def add_remove_compute_lma_infrastructure_alerting(self):
"""Add/remove compute nodes in existing environment
Scenario:
1. Remove 1 node with the compute role.
2. Re-deploy the cluster.
3. Check the plugin services using the CLI
4. Check in the Nagios UI that the removed node is no
longer monitored.
5. Run the health checks (OSTF).
6. Add 1 new node with the compute role.
7. Re-deploy the cluster.
8. Check the plugin services using the CLI.
9. Check in the Nagios UI that the new node is monitored.
10. Run the health checks (OSTF).
Duration 60m
"""
self.env.revert_snapshot("deploy_ha_lma_infrastructure_alerting")
target_node = self.helpers.get_fuel_node_name(
'slave-04_compute_cinder')
self.helpers.remove_node_from_cluster(
{'slave-04': ['compute', 'cinder']}, False, True)
self.helpers.run_ostf(should_fail=1)
self.check_plugin_online()
self.check_node_in_nagios(target_node, False)
self.helpers.add_node_to_cluster({'slave-04': ['compute', 'cinder']})
self.helpers.run_ostf(should_fail=1)
self.check_plugin_online()
target_node = self.helpers.get_fuel_node_name(
'slave-04_compute_cinder')
self.check_node_in_nagios(target_node, True)
@test(depends_on_groups=['deploy_ha_lma_infrastructure_alerting'],
groups=["add_remove_infrastructure_alerting_node", "system",
"lma_infrastructure_alerting", "scaling"])
@log_snapshot_after_test
def add_remove_infrastructure_alerting_node(self):
"""Add/remove infrastructure alerting nodes in existing environment
Scenario:
1. Remove 1 node with the infrastructure_alerting role.
2. Re-deploy the cluster.
3. Check the plugin services using the CLI
4. Check that Nagios UI works correctly.
5. Run the health checks (OSTF).
6. Add 1 new node with the infrastructure_alerting role.
7. Re-deploy the cluster.
8. Check the plugin services using the CLI.
9. Check that Nagios UI works correctly.
10. Run the health checks (OSTF).
Duration 60m
"""
self.env.revert_snapshot("deploy_ha_lma_infrastructure_alerting")
target_node = self.helpers.get_fuel_node_name(
'slave-05_{0}'.format(self.settings.role_name[0]))
self.helpers.remove_node_from_cluster(
{'slave-05': self.settings.role_name})
self.helpers.run_ostf(should_fail=1)
self.check_plugin_online()
self.check_node_in_nagios(target_node, False)
self.helpers.add_node_to_cluster(
{'slave-05': self.settings.role_name})
self.helpers.run_ostf(should_fail=1)
self.check_plugin_online()
target_node = self.helpers.get_fuel_node_name(
'slave-05_{0}'.format(self.settings.role_name[0]))
self.check_node_in_nagios(target_node, True)
@test(depends_on_groups=['deploy_ha_lma_infrastructure_alerting'],
groups=["shutdown_infrastructure_alerting_node", "system",
"lma_infrastructure_alerting", "shutdown"])
@log_snapshot_after_test
def shutdown_infrastructure_alerting_node(self):
"""Shutdown infrastructure alerting node
Scenario:
1. Connect to any infrastructure_alerting node and run
command crm status.
2. Shutdown node were vip_infrastructure_alerting_mgmt_vip
was started.
3. Check that vip_infrastructure_alerting was started
on another infrastructure_alerting node.
4. Check that Nagios UI works correctly.
5. Check that no data lost after shutdown.
6. Run OSTF.
Duration 60m
"""
self.env.revert_snapshot("deploy_ha_lma_infrastructure_alerting")
target_node = self.get_primary_lma_node()
self.fuel_web.warm_shutdown_nodes([target_node])
new_node = self.get_primary_lma_node(target_node.name)
asserts.assert_not_equal(target_node, new_node)
self.check_plugin_online()
self.helpers.run_ostf()
@test(depends_on_groups=['prepare_slaves_3'],
groups=["lma_infrastructure_alerting_createmirror_deploy_plugin",
"system", "lma_infrastructure_alerting", "createmirror"])
@log_snapshot_after_test
def lma_infrastructure_alerting_createmirror_deploy_plugin(self):
"""Run fuel-createmirror and deploy environment
Scenario:
1. Copy the plugins to the Fuel Master node and
install the plugins.
2. Run the following command on the master node:
fuel-createmirror
3. Create an environment with enabled plugins in the
Fuel Web UI and deploy it.
4. Run OSTF.
Duration 60m
"""
self.env.revert_snapshot("ready_with_3_slaves")
self.prepare_plugin()
self.helpers.fuel_createmirror()
self.create_cluster()
self.activate_plugin()
self.helpers.deploy_cluster(
{
'slave-01': ['controller'],
'slave-02': ['compute'],
'slave-03': self.settings.role_name
}
)
self.check_plugin_online()
self.helpers.run_ostf()

View File

@ -50,6 +50,8 @@ def import_tests():
from stacklight_tests.lma_infrastructure_alerting import ( # noqa from stacklight_tests.lma_infrastructure_alerting import ( # noqa
test_smoke_bvt) # noqa test_smoke_bvt) # noqa
from stacklight_tests.lma_infrastructure_alerting import ( # noqa
test_system) # noqa
def run_tests(): def run_tests():