Add test that checks work after maintenance update
Change-Id: I9dcbc4bc5d4888ab23ef8d4385147bad140a55ae
This commit is contained in:
parent
75f5301e26
commit
647a66c920
@ -43,8 +43,8 @@ def check_process_count(remote, process, count):
|
|||||||
|
|
||||||
:param remote: SSH connection to the node.
|
:param remote: SSH connection to the node.
|
||||||
:type remote: SSHClient
|
:type remote: SSHClient
|
||||||
:param service_name: the process name to match.
|
:param process: the process name to match.
|
||||||
:type service_name: str
|
:type process: str
|
||||||
:param count: the number of processes to match.
|
:param count: the number of processes to match.
|
||||||
:type count: int
|
:type count: int
|
||||||
:returns: list of PIDs.
|
:returns: list of PIDs.
|
||||||
|
@ -21,6 +21,9 @@ from devops.helpers import helpers
|
|||||||
from fuelweb_test import logger
|
from fuelweb_test import logger
|
||||||
from proboscis import asserts
|
from proboscis import asserts
|
||||||
|
|
||||||
|
from stacklight_tests.helpers import remote_ops
|
||||||
|
from stacklight_tests import settings
|
||||||
|
|
||||||
|
|
||||||
PLUGIN_PACKAGE_RE = re.compile(r'([^/]+)-(\d+\.\d+)-(\d+\.\d+\.\d+)')
|
PLUGIN_PACKAGE_RE = re.compile(r'([^/]+)-(\d+\.\d+)-(\d+\.\d+\.\d+)')
|
||||||
|
|
||||||
@ -175,16 +178,16 @@ class PluginHelper(object):
|
|||||||
"""Assign roles to nodes and deploy the cluster.
|
"""Assign roles to nodes and deploy the cluster.
|
||||||
|
|
||||||
:param nodes_roles: nodes to roles mapping.
|
:param nodes_roles: nodes to roles mapping.
|
||||||
:type name: dict
|
:type nodes_roles: dict
|
||||||
:param verify_network: whether or not network verification should be
|
:param verify_network: whether or not network verification should be
|
||||||
run before the deployment (default: False).
|
run before the deployment (default: False).
|
||||||
:type settings: boolean
|
:type verify_network: boolean
|
||||||
:param update_interfaces: whether or not interfaces should be updated
|
:param update_interfaces: whether or not interfaces should be updated
|
||||||
before the deployment (default: True).
|
before the deployment (default: True).
|
||||||
:type settings: boolean
|
:type update_interfaces: boolean
|
||||||
:param check_services: whether or not OSTF tests should run after the
|
:param check_services: whether or not OSTF tests should run after the
|
||||||
deployment (default: True).
|
deployment (default: True).
|
||||||
:type settings: boolean
|
:type check_services: boolean
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
self.fuel_web.update_nodes(self.cluster_id, nodes_roles,
|
self.fuel_web.update_nodes(self.cluster_id, nodes_roles,
|
||||||
@ -441,3 +444,81 @@ class PluginHelper(object):
|
|||||||
result = self.nailgun_client.put_deployment_tasks_for_cluster(
|
result = self.nailgun_client.put_deployment_tasks_for_cluster(
|
||||||
self.cluster_id, data=task_ids, node_id=node_ids)
|
self.cluster_id, data=task_ids, node_id=node_ids)
|
||||||
self.fuel_web.assert_task_success(result, timeout=timeout)
|
self.fuel_web.assert_task_success(result, timeout=timeout)
|
||||||
|
|
||||||
|
def apply_maintenance_update(self):
|
||||||
|
"""Method applies maintenance updates on whole cluster."""
|
||||||
|
logger.info("Applying maintenance updates on master node")
|
||||||
|
self.env.admin_install_updates()
|
||||||
|
|
||||||
|
logger.info("Applying maintenance updates on slaves")
|
||||||
|
slaves_mu_script_url = (
|
||||||
|
"https://github.com/Mirantis/tools-sustaining/"
|
||||||
|
"raw/master/scripts/mos_apply_mu.py")
|
||||||
|
|
||||||
|
path_to_mu_script = "/tmp/mos_apply_mu.py"
|
||||||
|
|
||||||
|
with self.env.d_env.get_admin_remote() as remote:
|
||||||
|
remote.check_call("wget {uri} -O {path}".format(
|
||||||
|
uri=slaves_mu_script_url,
|
||||||
|
path=path_to_mu_script)
|
||||||
|
)
|
||||||
|
|
||||||
|
remote.check_call(
|
||||||
|
"python {path} "
|
||||||
|
"--env-id={identifier} "
|
||||||
|
"--user={username} "
|
||||||
|
"--pass={password} "
|
||||||
|
"--tenant={tenant_name} --update".format(
|
||||||
|
path=path_to_mu_script,
|
||||||
|
identifier=self.cluster_id,
|
||||||
|
**settings.KEYSTONE_CREDS
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
controllers = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
self.cluster_id, roles=['controller', ])
|
||||||
|
|
||||||
|
computes = self.fuel_web.get_nailgun_cluster_nodes_by_roles(
|
||||||
|
self.cluster_id, roles=['compute', ])
|
||||||
|
|
||||||
|
logger.info("Restarting all OpenStack services")
|
||||||
|
|
||||||
|
logger.info("Restarting services on controllers")
|
||||||
|
ha_services = (
|
||||||
|
"p_heat-engine",
|
||||||
|
"p_neutron-plugin-openvswitch-agent",
|
||||||
|
"p_neutron-dhcp-agent",
|
||||||
|
"p_neutron-metadata-agent",
|
||||||
|
"p_neutron-l3-agent")
|
||||||
|
non_ha_services = (
|
||||||
|
"heat-api-cloudwatch",
|
||||||
|
"heat-api-cfn",
|
||||||
|
"heat-api",
|
||||||
|
"cinder-api",
|
||||||
|
"cinder-scheduler",
|
||||||
|
"nova-objectstore",
|
||||||
|
"nova-cert",
|
||||||
|
"nova-api",
|
||||||
|
"nova-consoleauth",
|
||||||
|
"nova-conductor",
|
||||||
|
"nova-scheduler",
|
||||||
|
"nova-novncproxy",
|
||||||
|
"neutron-server",
|
||||||
|
)
|
||||||
|
for controller in controllers:
|
||||||
|
with self.fuel_web.get_ssh_for_nailgun_node(
|
||||||
|
controller) as remote:
|
||||||
|
for service in ha_services:
|
||||||
|
remote_ops.manage_pacemaker_service(remote, service)
|
||||||
|
for service in non_ha_services:
|
||||||
|
remote_ops.manage_initctl_service(remote, service)
|
||||||
|
|
||||||
|
logger.info("Restarting services on computes")
|
||||||
|
compute_services = (
|
||||||
|
"neutron-plugin-openvswitch-agent",
|
||||||
|
"nova-compute",
|
||||||
|
)
|
||||||
|
for compute in computes:
|
||||||
|
with self.fuel_web.get_ssh_for_nailgun_node(compute) as remote:
|
||||||
|
for service in compute_services:
|
||||||
|
remote_ops.manage_initctl_service(remote, service)
|
||||||
|
@ -80,3 +80,31 @@ def get_pids_of_process(remote, name):
|
|||||||
if result['exit_code'] != 0:
|
if result['exit_code'] != 0:
|
||||||
return []
|
return []
|
||||||
return result['stdout'][0].strip().split()
|
return result['stdout'][0].strip().split()
|
||||||
|
|
||||||
|
|
||||||
|
def manage_pacemaker_service(remote, name, operation="restart"):
|
||||||
|
"""Operate HA service on remote node.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param name: service name.
|
||||||
|
:type name: str
|
||||||
|
:param operation: type of operation, usually start, stop or restart.
|
||||||
|
:type operation: str
|
||||||
|
"""
|
||||||
|
remote.check_call("crm resource {operation} {service}".format(
|
||||||
|
operation=operation, service=name))
|
||||||
|
|
||||||
|
|
||||||
|
def manage_initctl_service(remote, name, operation="restart"):
|
||||||
|
"""Operate service on remote node.
|
||||||
|
|
||||||
|
:param remote: SSH connection to the node.
|
||||||
|
:type remote: SSHClient
|
||||||
|
:param name: service name.
|
||||||
|
:type name: str
|
||||||
|
:param operation: type of operation, usually start, stop or restart.
|
||||||
|
:type operation: str
|
||||||
|
"""
|
||||||
|
remote.check_call("initctl {operation} {service}".format(
|
||||||
|
operation=operation, service=name))
|
||||||
|
@ -242,6 +242,58 @@ class TestNodesToolchain(api.ToolchainApi):
|
|||||||
|
|
||||||
self.helpers.run_ostf()
|
self.helpers.run_ostf()
|
||||||
|
|
||||||
|
@test(depends_on_groups=["deploy_toolchain"],
|
||||||
|
groups=["check_toolchain_after_maintenance_update",
|
||||||
|
"system", "toolchain", "maintenance_update"])
|
||||||
|
@log_snapshot_after_test
|
||||||
|
def check_toolchain_after_maintenance_update(self):
|
||||||
|
"""Check work after applying maintenance update.
|
||||||
|
|
||||||
|
Scenario:
|
||||||
|
1. Revert the snapshot with 3 deployed nodes
|
||||||
|
2. Get pid of services which were launched
|
||||||
|
on controller/compute/storage/etc nodes by plugin and store them
|
||||||
|
3. Apply maintenance update
|
||||||
|
4. Get pid of services which were launched
|
||||||
|
on controller/compute/storage/etc nodes by plugin
|
||||||
|
and verify that they wasn't changed from last check
|
||||||
|
5. Run OSTF
|
||||||
|
|
||||||
|
Duration 240m
|
||||||
|
"""
|
||||||
|
self.env.revert_snapshot("deploy_toolchain")
|
||||||
|
|
||||||
|
ready_nodes_before = self.helpers.get_all_ready_nodes()
|
||||||
|
|
||||||
|
ready_nodes_hostnames_before = {node["hostname"]
|
||||||
|
for node in ready_nodes_before}
|
||||||
|
|
||||||
|
pids_before = self.get_pids_of_services()
|
||||||
|
|
||||||
|
self.helpers.apply_maintenance_update()
|
||||||
|
|
||||||
|
ready_nodes_hostnames_after = {node["hostname"] for node
|
||||||
|
in self.helpers.get_all_ready_nodes()}
|
||||||
|
|
||||||
|
asserts.assert_equal(
|
||||||
|
ready_nodes_hostnames_before, ready_nodes_hostnames_after,
|
||||||
|
"List of ready nodes is not equal, "
|
||||||
|
"before maintenance update: {}, "
|
||||||
|
"after maintenance update: {}.".format(
|
||||||
|
ready_nodes_hostnames_before, ready_nodes_hostnames_after)
|
||||||
|
)
|
||||||
|
|
||||||
|
pids_after = self.get_pids_of_services()
|
||||||
|
asserts.assert_equal(
|
||||||
|
pids_after, pids_before,
|
||||||
|
"PIDs of services not equal, "
|
||||||
|
"before maintenance update: {}, "
|
||||||
|
"after maintenance update: {}.".format(pids_before, pids_after))
|
||||||
|
|
||||||
|
self.check_plugins_online()
|
||||||
|
|
||||||
|
self.helpers.run_ostf()
|
||||||
|
|
||||||
@test(depends_on_groups=["deploy_ha_toolchain"],
|
@test(depends_on_groups=["deploy_ha_toolchain"],
|
||||||
groups=["shutdown_infrastructure_alerting_node_in_toolchain",
|
groups=["shutdown_infrastructure_alerting_node_in_toolchain",
|
||||||
"failover", "toolchain", "system", "destructive"])
|
"failover", "toolchain", "system", "destructive"])
|
||||||
|
Loading…
x
Reference in New Issue
Block a user