Add failover tests for plugins' toolchain
Change-Id: Ie81b0e0ee6d8a88af15128ffe56c0024d4fc3055
This commit is contained in:
parent
4d806b3aee
commit
bcba5b798d
@ -110,3 +110,13 @@ class PluginApi(object):
|
||||
msg = "Plugin has not become online after a waiting period"
|
||||
devops_helpers.wait(
|
||||
check_availability, timeout=timeout, timeout_msg=msg)
|
||||
|
||||
def check_plugin_failover(self):
|
||||
"""Check that failover for the plugin works.
|
||||
"""
|
||||
vip_name = self.helpers.full_vip_name(self.settings.vip_name)
|
||||
target_node = self.helpers.get_node_with_vip(
|
||||
self.settings.role_name, vip_name)
|
||||
self.helpers.power_off_node(target_node)
|
||||
self.helpers.wait_for_vip_migration(
|
||||
target_node, self.settings.role_name, vip_name)
|
||||
|
@ -146,8 +146,7 @@ class TestNodesElasticsearchPlugin(api.ElasticsearchPluginApi):
|
||||
self.helpers.run_ostf()
|
||||
|
||||
@test(depends_on_groups=["deploy_ha_elasticsearch_kibana"],
|
||||
groups=["check_failover_elasticsearch_kibana" "failover",
|
||||
"elasticsearch_kibana", "system", "destructive",
|
||||
groups=["failover", "elasticsearch_kibana", "system", "destructive",
|
||||
"shutdown_elasticsearch_kibana_node"])
|
||||
@log_snapshot_after_test
|
||||
def shutdown_elasticsearch_kibana_node(self):
|
||||
@ -165,15 +164,7 @@ class TestNodesElasticsearchPlugin(api.ElasticsearchPluginApi):
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_elasticsearch_kibana")
|
||||
|
||||
vip_name = self.helpers.full_vip_name(self.settings.vip_name)
|
||||
|
||||
target_node = self.helpers.get_node_with_vip(
|
||||
self.settings.role_name, vip_name)
|
||||
|
||||
self.helpers.power_off_node(target_node)
|
||||
|
||||
self.helpers.wait_for_vip_migration(
|
||||
target_node, self.settings.role_name, vip_name)
|
||||
self.check_plugin_failover()
|
||||
|
||||
self.check_plugin_online()
|
||||
|
||||
|
@ -286,6 +286,9 @@ class PluginHelper(object):
|
||||
|
||||
def power_off_node(self, node):
|
||||
"""Power off a node.
|
||||
|
||||
:param node: Devops node.
|
||||
:type node: devops node instance
|
||||
"""
|
||||
msg = 'Node {0} has not become offline after hard shutdown'.format(
|
||||
node.name)
|
||||
|
@ -156,8 +156,7 @@ class TestNodesInfluxdbPlugin(api.InfluxdbPluginApi):
|
||||
self.helpers.run_ostf()
|
||||
|
||||
@test(depends_on_groups=["deploy_ha_influxdb_grafana"],
|
||||
groups=["check_failover_influxdb_grafana" "failover",
|
||||
"influxdb_grafana", "system", "destructive",
|
||||
groups=["failover", "influxdb_grafana", "system", "destructive",
|
||||
"shutdown_influxdb_grafana_node"])
|
||||
@log_snapshot_after_test
|
||||
def shutdown_influxdb_grafana_node(self):
|
||||
@ -175,20 +174,10 @@ class TestNodesInfluxdbPlugin(api.InfluxdbPluginApi):
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_influxdb_grafana")
|
||||
|
||||
vip_name = self.helpers.full_vip_name(self.settings.vip_name)
|
||||
|
||||
target_node = self.helpers.get_node_with_vip(
|
||||
self.settings.role_name, vip_name)
|
||||
|
||||
self.helpers.power_off_node(target_node)
|
||||
|
||||
self.helpers.wait_for_vip_migration(
|
||||
target_node, self.settings.role_name, vip_name)
|
||||
self.check_plugin_failover()
|
||||
|
||||
self.check_plugin_online()
|
||||
|
||||
# TODO(rpromyshlennikov): check no data lost
|
||||
|
||||
self.helpers.run_ostf()
|
||||
|
||||
@test(depends_on_groups=["prepare_slaves_3"],
|
||||
|
@ -52,7 +52,9 @@ class LMACollectorPluginApi(base_test.PluginApi):
|
||||
# Starting with 0.10, there are one collector for logs and one for
|
||||
# metrics
|
||||
processes_count["hekad"] = 2
|
||||
for node in self.helpers.get_all_ready_nodes():
|
||||
online_nodes = [node for node in self.helpers.get_all_ready_nodes()
|
||||
if node["online"]]
|
||||
for node in online_nodes:
|
||||
pids[node["name"]] = {}
|
||||
with self.env.d_env.get_ssh_to_remote(node["ip"]) as remote:
|
||||
for process, count in processes_count.items():
|
||||
|
@ -141,7 +141,8 @@ class TestLMAInfraAlertingPluginSystem(api.InfraAlertingPluginApi):
|
||||
|
||||
@test(depends_on_groups=["deploy_ha_lma_infrastructure_alerting"],
|
||||
groups=["shutdown_infrastructure_alerting_node", "system",
|
||||
"lma_infrastructure_alerting", "shutdown"])
|
||||
"lma_infrastructure_alerting", "destructive",
|
||||
"failover"])
|
||||
@log_snapshot_after_test
|
||||
def shutdown_infrastructure_alerting_node(self):
|
||||
"""Verify that failover for LMA Infrastructure Alerting cluster works.
|
||||
@ -158,12 +159,7 @@ class TestLMAInfraAlertingPluginSystem(api.InfraAlertingPluginApi):
|
||||
Duration 30m
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_lma_infrastructure_alerting")
|
||||
vip_name = self.helpers.full_vip_name(self.settings.vip_name)
|
||||
target_node = self.helpers.get_node_with_vip(
|
||||
self.settings.role_name, vip_name)
|
||||
self.helpers.power_off_node(target_node)
|
||||
self.helpers.wait_for_vip_migration(
|
||||
target_node, self.settings.role_name, vip_name)
|
||||
self.check_plugin_failover()
|
||||
self.check_plugin_online()
|
||||
self.helpers.run_ostf()
|
||||
|
||||
|
@ -241,3 +241,80 @@ class TestNodesToolchain(api.ToolchainApi):
|
||||
self.check_plugins_online()
|
||||
|
||||
self.helpers.run_ostf()
|
||||
|
||||
@test(depends_on_groups=["deploy_ha_toolchain"],
|
||||
groups=["shutdown_infrastructure_alerting_node_in_toolchain",
|
||||
"failover", "toolchain", "system", "destructive"])
|
||||
@log_snapshot_after_test
|
||||
def shutdown_infrastructure_alerting_node_in_toolchain(self):
|
||||
"""Verify that failover for LMA Infrastructure Alerting cluster
|
||||
in plugins toolchain works.
|
||||
|
||||
Scenario:
|
||||
1. Shutdown node were vip_infrastructure_alerting_mgmt_vip
|
||||
was started.
|
||||
2. Check that vip_infrastructure_alerting was started
|
||||
on another plugin node.
|
||||
3. Check that plugins toolchain is working.
|
||||
4. Check that no data lost after shutdown.
|
||||
5. Run OSTF.
|
||||
|
||||
Duration 30m
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_toolchain")
|
||||
|
||||
self.LMA_INFRASTRUCTURE_ALERTING.check_plugin_failover()
|
||||
|
||||
self.check_plugins_online()
|
||||
|
||||
self.helpers.run_ostf()
|
||||
|
||||
@test(depends_on_groups=["deploy_ha_toolchain"],
|
||||
groups=["shutdown_influxdb_grafana_node_in_toolchain",
|
||||
"failover", "toolchain", "system", "destructive"])
|
||||
@log_snapshot_after_test
|
||||
def shutdown_influxdb_grafana_node_in_toolchain(self):
|
||||
"""Verify that failover for InfluxDB cluster
|
||||
in plugins toolchain works.
|
||||
|
||||
Scenario:
|
||||
1. Shutdown node were vip_influxdb was started.
|
||||
2. Check that vip_influxdb was started on another plugin node.
|
||||
3. Check that plugins toolchain is working.
|
||||
4. Check that no data lost after shutdown.
|
||||
5. Run OSTF.
|
||||
|
||||
Duration 30m
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_toolchain")
|
||||
|
||||
self.INFLUXDB_GRAFANA.check_plugin_failover()
|
||||
|
||||
self.check_plugins_online()
|
||||
|
||||
self.helpers.run_ostf()
|
||||
|
||||
@test(depends_on_groups=["deploy_ha_toolchain"],
|
||||
groups=["shutdown_elasticsearch_kibana_node_in_toolchain",
|
||||
"failover", "toolchain", "system", "destructive"])
|
||||
@log_snapshot_after_test
|
||||
def shutdown_elasticsearch_kibana_node_in_toolchain(self):
|
||||
"""Verify that failover for Elasticsearch cluster
|
||||
in plugins toolchain works.
|
||||
|
||||
Scenario:
|
||||
1. Shutdown node were es_vip_mgmt was started.
|
||||
2. Check that es_vip_mgmt was started on another plugin node.
|
||||
3. Check that plugins toolchain is working.
|
||||
4. Check that no data lost after shutdown.
|
||||
5. Run OSTF.
|
||||
|
||||
Duration 30m
|
||||
"""
|
||||
self.env.revert_snapshot("deploy_ha_toolchain")
|
||||
|
||||
self.ELASTICSEARCH_KIBANA.check_plugin_failover()
|
||||
|
||||
self.check_plugins_online()
|
||||
|
||||
self.helpers.run_ostf()
|
||||
|
Loading…
Reference in New Issue
Block a user