Implement node discovery
Since some tests from the plugin require list of nodes be available a discover_nodes() function was implemented, by using openstack API. This will ensure that discovering nodes will work properly on environments deployed with any installer. Also, adjusted multicast tests in order to prevent self-rebooting of the tempest node on a single-node environment. Change-Id: I80dd0ba855a63952d12214a4e7e9fd9c334e312a
This commit is contained in:
parent
a940232333
commit
8ac79db86c
16
playbooks/preparations.yaml
Normal file
16
playbooks/preparations.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
- hosts: all
|
||||
roles:
|
||||
- ensure-pip
|
||||
|
||||
tasks:
|
||||
- name: crudini
|
||||
pip:
|
||||
name: crudini
|
||||
state: present
|
||||
become: yes
|
||||
- name: Setup tempest SSH key
|
||||
include_role:
|
||||
name: copy-build-sshkey
|
||||
vars:
|
||||
ansible_become: yes
|
||||
copy_sshkey_target_user: 'tempest'
|
@ -77,5 +77,11 @@ WhiteboxNeutronPluginOptions = [
|
||||
cfg.IntOpt('sriov_vfs_per_pf',
|
||||
default=5,
|
||||
help='Number of available VF (Virtual Function) ports per'
|
||||
'PF interface on the environment under test')
|
||||
'PF interface on the environment under test'),
|
||||
cfg.StrOpt('overcloud_ssh_user',
|
||||
default='zuul',
|
||||
help='Common user to access openstack nodes via ssh.'),
|
||||
cfg.StrOpt('overcloud_key_file',
|
||||
default='/home/tempest/.ssh/id_rsa',
|
||||
help='ssh private key file path for overcloud nodes access.')
|
||||
]
|
||||
|
@ -18,7 +18,11 @@ import re
|
||||
import time
|
||||
|
||||
import netaddr
|
||||
from netifaces import AF_INET
|
||||
from netifaces import ifaddresses
|
||||
from netifaces import interfaces
|
||||
from neutron_lib import constants
|
||||
from neutron_tempest_plugin.common import shell
|
||||
from neutron_tempest_plugin.common import ssh
|
||||
from neutron_tempest_plugin.common import utils as common_utils
|
||||
from neutron_tempest_plugin.scenario import base
|
||||
@ -80,6 +84,84 @@ class BaseTempestWhiteboxTestCase(base.BaseTempestTestCase):
|
||||
subnet['ip_version'] == constants.IP_VERSION_4):
|
||||
return subnet['gateway_ip']
|
||||
|
||||
@staticmethod
|
||||
def get_node_client(host):
|
||||
return ssh.Client(
|
||||
host=host, username=WB_CONF.overcloud_ssh_user,
|
||||
key_filename=WB_CONF.overcloud_key_file)
|
||||
|
||||
def get_local_ssh_client(self, network):
|
||||
return ssh.Client(
|
||||
host=self._get_local_ip_from_network(
|
||||
self.get_subnet_cidr(network, 4)),
|
||||
username=shell.execute_local_command('whoami').stdout.rstrip(),
|
||||
key_filename=WB_CONF.overcloud_key_file)
|
||||
|
||||
def get_subnet_cidr(self, network, ip_version):
|
||||
for subnet_id in network['subnets']:
|
||||
subnet = self.os_admin.network_client.show_subnet(
|
||||
subnet_id)['subnet']
|
||||
if subnet['ip_version'] == ip_version:
|
||||
return subnet['cidr']
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _get_local_ip_from_network(network):
|
||||
host_ip_addresses = [ifaddresses(iface)[AF_INET][0]['addr']
|
||||
for iface in interfaces()
|
||||
if AF_INET in ifaddresses(iface)]
|
||||
for ip_address in host_ip_addresses:
|
||||
if netaddr.IPAddress(ip_address) in netaddr.IPNetwork(network):
|
||||
return ip_address
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def append_node(cls, host, is_compute=False, is_networker=False):
|
||||
|
||||
hostname = host.split('.')[0]
|
||||
for node in cls.nodes:
|
||||
if node['name'] == hostname:
|
||||
if not node['is_networker']:
|
||||
node['is_networker'] = is_networker
|
||||
if not node['is_compute']:
|
||||
node['is_compute'] = is_compute
|
||||
return
|
||||
|
||||
if WB_CONF.openstack_type == 'tripleo':
|
||||
host = hostname + ".ctlplane"
|
||||
node = {'name': hostname,
|
||||
'client': cls.get_node_client(host),
|
||||
'is_networker': is_networker,
|
||||
'is_controller': False,
|
||||
'is_compute': is_compute}
|
||||
# Here we are checking if there are controller-specific
|
||||
# processes running on the node
|
||||
output = node['client'].exec_command(
|
||||
r"ps ax | grep 'rabbit\|galera' | grep -v grep || true")
|
||||
if output.strip() != "":
|
||||
node['is_controller'] = True
|
||||
cls.nodes.append(node)
|
||||
|
||||
@classmethod
|
||||
def discover_nodes(cls):
|
||||
agents = cls.os_admin.network.AgentsClient().list_agents()['agents']
|
||||
if cls.has_ovn_support:
|
||||
l3_agent_hosts = [
|
||||
agent['host'] for agent in agents
|
||||
if agent['agent_type'] == 'OVN Controller Gateway agent']
|
||||
else:
|
||||
l3_agent_hosts = [
|
||||
agent['host'] for agent in agents
|
||||
if agent['binary'] == 'neutron-l3-agent']
|
||||
compute_hosts = [
|
||||
host['hypervisor_hostname'] for host
|
||||
in cls.os_admin.hv_client.list_hypervisors()['hypervisors']]
|
||||
cls.nodes = []
|
||||
for host in compute_hosts:
|
||||
cls.append_node(host, is_compute=True)
|
||||
for host in l3_agent_hosts:
|
||||
cls.append_node(host, is_networker=True)
|
||||
|
||||
def _create_server_for_topology(
|
||||
self, network_id=None, port_type=None,
|
||||
different_host=None, port_qos_policy_id=None):
|
||||
@ -232,6 +314,7 @@ class TrafficFlowTest(BaseTempestWhiteboxTestCase):
|
||||
raise cls.skipException("IPv4 gateway is not configured "
|
||||
"for public network or public_network_id "
|
||||
"is not configured.")
|
||||
cls.discover_nodes()
|
||||
|
||||
def _start_captures(self, interface, filters):
|
||||
for node in self.nodes:
|
||||
|
@ -365,7 +365,7 @@ class BaseMulticastTest(object):
|
||||
|
||||
def restart_openvswitch_on_compute_nodes(self):
|
||||
for node in self.nodes:
|
||||
if node['type'] == 'compute':
|
||||
if node['is_compute'] is True and node['is_controller'] is False:
|
||||
node['client'].exec_command(
|
||||
"sudo systemctl restart ovs-vswitchd.service")
|
||||
time.sleep(20)
|
||||
@ -668,6 +668,9 @@ class MulticastTestIPv4Common(MulticastTestIPv4):
|
||||
raise self.skipException(
|
||||
"Nodes info not available. Test won't be able to restart "
|
||||
"openvswitch service on a node.")
|
||||
if len(self.nodes) == 1:
|
||||
raise self.skipException(
|
||||
"This test is not supported on a single-node environment")
|
||||
mcast_groups = [next(self.multicast_group_iter)
|
||||
for _ in range(self.mcast_groups_count)]
|
||||
sender, receivers, unregistered, _ = self._prepare_igmp_snooping_test(
|
||||
@ -694,7 +697,7 @@ class MulticastTestIPv4Sriov(MulticastTestIPv4):
|
||||
available_sriov_ports = (
|
||||
WB_CONF.sriov_pfs_per_host *
|
||||
len([node for node in self.nodes
|
||||
if node['type'] == 'compute']))
|
||||
if node['is_compute'] is True]))
|
||||
if (available_sriov_ports < required_sriov_ports):
|
||||
self.skipTest(
|
||||
'Not enough SR-IOV ports ({}), while required {}'.format(
|
||||
@ -795,9 +798,13 @@ class MulticastTestIPv4OvnBase(MulticastTestIPv4, base.BaseTempestTestCaseOvn):
|
||||
|
||||
def restart_ovn_controller_on_compute_nodes(self):
|
||||
for node in self.nodes:
|
||||
if node['type'] == 'compute':
|
||||
node['client'].exec_command(
|
||||
"sudo podman restart ovn_controller")
|
||||
if node['is_compute'] is True and node['is_controller'] is False:
|
||||
service_prefix = (
|
||||
"tripleo_" if WB_CONF.openstack_type == "tripleo"
|
||||
else "")
|
||||
cmd = ("sudo systemctl restart {}"
|
||||
"ovn-controller.service".format(service_prefix))
|
||||
node['client'].exec_command(cmd)
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
|
@ -244,6 +244,7 @@
|
||||
- job:
|
||||
name: whitebox-neutron-tempest-plugin-ovn
|
||||
parent: whitebox-neutron-tempest-plugin-base-nested-switch
|
||||
pre-run: playbooks/preparations.yaml
|
||||
timeout: 10800
|
||||
# TODO(mblue): Remove line when changing ovn job to be HA job as well.
|
||||
nodeset: whitebox-neutron-single-node-centos-9-stream
|
||||
@ -398,7 +399,9 @@
|
||||
- job:
|
||||
name: whitebox-neutron-tempest-plugin-openvswitch
|
||||
parent: whitebox-neutron-tempest-plugin-base-nested-switch
|
||||
pre-run: playbooks/multinode-devstack-ovs-tempest.yaml
|
||||
pre-run:
|
||||
- playbooks/preparations.yaml
|
||||
- playbooks/multinode-devstack-ovs-tempest.yaml
|
||||
timeout: 10000
|
||||
vars:
|
||||
configure_swap_size: 2048
|
||||
|
Loading…
Reference in New Issue
Block a user