
Patch [1] introduced a unified way to restart services. As appeared, globbing does not work as expected in case a service is inactive. This patch makes reset_node_service() to work properly also with inactive services. Additionally, adjusted _get_vm_id_by_name() from test_vrrp.py to work properly in case VM hostname contains a domain name. [1] https://review.opendev.org/c/x/whitebox-neutron-tempest-plugin/+/914116 Change-Id: I0c4222165b89ac5673bc1d6d92899761b921b366
243 lines
9.9 KiB
Python
243 lines
9.9 KiB
Python
# Copyright 2024 Red Hat, Inc.
|
|
# All Rights Reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
from neutron_lib import constants
|
|
from neutron_tempest_plugin.common import ssh
|
|
from neutron_tempest_plugin import config
|
|
from neutron_tempest_plugin import exceptions
|
|
from oslo_log import log
|
|
from tempest.common import compute
|
|
from tempest.common import utils
|
|
from tempest.common import waiters
|
|
from tempest.lib.common.utils import data_utils
|
|
from tempest.lib import decorators
|
|
|
|
from whitebox_neutron_tempest_plugin.tests.scenario import base
|
|
|
|
CONF = config.CONF
|
|
LOG = log.getLogger(__name__)
|
|
|
|
keepalived_config_template = """vrrp_instance VIP_1 {
|
|
state MASTER
|
|
interface %s
|
|
virtual_router_id 51
|
|
priority 150
|
|
advert_int 1
|
|
authentication {
|
|
auth_type PASS
|
|
auth_pass secretpass
|
|
}
|
|
virtual_ipaddress {
|
|
%s
|
|
}
|
|
}
|
|
"""
|
|
|
|
|
|
def get_keepalived_config(interface, vip_ip):
|
|
return keepalived_config_template % (interface, vip_ip)
|
|
|
|
|
|
class VrrpTest(base.BaseTempestTestCaseAdvanced):
|
|
credentials = ['primary', 'admin']
|
|
|
|
@classmethod
|
|
@utils.requires_ext(extension="router", service="network")
|
|
def resource_setup(cls):
|
|
super(VrrpTest, cls).resource_setup()
|
|
# Create keypair with admin privileges
|
|
cls.keypair = cls.create_keypair()
|
|
# Create security group with admin privileges
|
|
cls.secgroup = cls.create_security_group(
|
|
name=data_utils.rand_name('secgroup'))
|
|
# Execute funcs to achieve ssh, ICMP and VRRP capabilities
|
|
cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
|
|
cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
|
|
cls.create_security_group_rule(security_group_id=cls.secgroup['id'],
|
|
protocol=constants.PROTO_NAME_VRRP,
|
|
direction=constants.INGRESS_DIRECTION)
|
|
|
|
def _create_server(self, port, name=None, scheduler_hints=None):
|
|
if not name:
|
|
name = data_utils.rand_name('vm')
|
|
params = {
|
|
'flavor_ref': self.flavor_ref,
|
|
'image_ref': self.image_ref,
|
|
'key_name': self.keypair['name'],
|
|
'name': name
|
|
}
|
|
if (scheduler_hints and CONF.compute.min_compute_nodes > 1 and
|
|
compute.is_scheduler_filter_enabled("DifferentHostFilter")):
|
|
params['scheduler_hints'] = scheduler_hints
|
|
vm = self.create_server(networks=[{'port': port['id']}], **params)
|
|
vm['server']['name'] = name
|
|
return vm
|
|
|
|
def _get_vm_host(self, server_id):
|
|
server_details = self.os_admin.servers_client.show_server(server_id)
|
|
return server_details['server']['OS-EXT-SRV-ATTR:host']
|
|
|
|
def _check_keepalived_on_server(self, ssh_client, server_id):
|
|
try:
|
|
ssh_client.execute_script('PATH=$PATH:/usr/sbin which keepalived')
|
|
except exceptions.SSHScriptFailed:
|
|
raise self.skipException(
|
|
"keepalived is not available on server %s" % (server_id))
|
|
|
|
def _prepare_server(self, ssh_client, interface, vip_ip):
|
|
config_text = get_keepalived_config(interface, vip_ip)
|
|
config_file = 'keepalived.conf'
|
|
ssh_client.execute_script(
|
|
'echo "{0}" > /tmp/{1};'
|
|
'sudo mv -Z /tmp/{1} /etc/keepalived/'.format(
|
|
config_text, config_file))
|
|
ssh_client.exec_command("sudo systemctl restart keepalived")
|
|
# restart and make sure keepalived is active
|
|
self.reset_node_service('keepalived', ssh_client)
|
|
|
|
@staticmethod
|
|
def _get_vm_id_by_name(name, vms):
|
|
for vm in vms:
|
|
if vm['server']['name'] in name:
|
|
return vm['server']['id']
|
|
return None
|
|
|
|
def _get_client(self, ip_address, proxy_client=None):
|
|
return ssh.Client(ip_address,
|
|
self.username,
|
|
pkey=self.keypair['private_key'],
|
|
proxy_client=proxy_client)
|
|
|
|
@decorators.idempotent_id('f88ca220-eea2-48d2-9cac-3f382908cb37')
|
|
def test_vrrp_vip_failover(self):
|
|
"""This test verifies traffic flow during VRRP VIP failover
|
|
|
|
The aim of the test is to validate that in case master VM
|
|
becomes not available the traffic to the VIP is directed to the
|
|
second VM.
|
|
|
|
Recommended topology:
|
|
Controller node plus at least 2 compute nodes.
|
|
|
|
Scenario:
|
|
- Create a port for VRRP VIP and ports for VMs with
|
|
allowed address pair configured to the VIP IP address
|
|
- Attach a FIP to each one of these ports, including the VIP. We will
|
|
differentiate between private VIP and public (FIP) VIP
|
|
- Create two VMs on different compute nodes
|
|
- Setup VRRP between the VMs using keepalived
|
|
- Create a proxy VM with a normal port and a FIP. This VM is neither
|
|
part of the VRRP VIP configuration, nor keepalived is installed on it
|
|
- Test traffic to the public VIP (login via ssh)
|
|
- Test traffic to the private VIP through the proxy VM
|
|
- Kill active VM
|
|
- Test traffic to the public VIP. Traffic should now flow to the
|
|
second VM
|
|
- Test traffic to the private VIP through the proxy VM. Traffic should
|
|
now flow to the second VM
|
|
"""
|
|
network = self.create_network()
|
|
subnet = self.create_subnet(network, cidr="192.168.100.0/24")
|
|
router = self.create_router_by_client()
|
|
self.create_router_interface(router['id'], subnet['id'])
|
|
ports = {'vip': {}, 'vm1': {}, 'vm2': {}}
|
|
|
|
ports['vip']['port'] = self.create_port(
|
|
network=network)
|
|
vip_ip = ports['vip']['port']['fixed_ips'][0]['ip_address']
|
|
|
|
vm_names = ['vm1', 'vm2']
|
|
for vm in vm_names:
|
|
ports[vm]['port'] = self.create_port(
|
|
network=network, security_groups=[self.secgroup['id']],
|
|
allowed_address_pairs=[{"ip_address": vip_ip}])
|
|
|
|
for key in ports.keys():
|
|
ports[key]['fip'] = self.create_floatingip(
|
|
port=ports[key]['port'])
|
|
|
|
vms = []
|
|
vm1 = self._create_server(port=ports['vm1']['port'], name='vm1')
|
|
vm2 = self._create_server(
|
|
port=ports['vm2']['port'], name='vm2',
|
|
scheduler_hints={'different_host': vm1['server']['id']})
|
|
vms = [vm1, vm2]
|
|
|
|
if (self._get_vm_host(vm1['server']['id']) ==
|
|
self._get_vm_host(vm2['server']['id']) and
|
|
CONF.compute.min_compute_nodes > 1):
|
|
raise self.skipException(
|
|
"VMs are running on the same host."
|
|
"Make sure you have DifferentHostFilter enabled in nova.conf "
|
|
"in order to cover multi-node scenario properly.")
|
|
|
|
for vm in vm_names:
|
|
ports[vm]['client'] = ssh.Client(
|
|
ports[vm]['fip']['floating_ip_address'],
|
|
self.username,
|
|
pkey=self.keypair['private_key'])
|
|
interface = ports[vm]['client'].exec_command(
|
|
"PATH=$PATH:/usr/sbin ip route get default 8.8.8.8 | "
|
|
"head -1 | cut -d ' ' -f 5").rstrip()
|
|
self._check_keepalived_on_server(ports[vm]['client'], vm)
|
|
self._prepare_server(ports[vm]['client'], interface, vip_ip)
|
|
|
|
# create proxy vm
|
|
port_vm_proxy = self.create_port(network=network,
|
|
security_groups=[self.secgroup['id']])
|
|
self._create_server(port=port_vm_proxy, name='vm_proxy')
|
|
fip_vm_proxy = self.create_floatingip(port=port_vm_proxy)
|
|
proxy_client = ssh.Client(fip_vm_proxy['floating_ip_address'],
|
|
self.username,
|
|
pkey=self.keypair['private_key'])
|
|
|
|
# verify public VIP connectivity
|
|
ports['vip']['client'] = self._get_client(
|
|
ports['vip']['fip']['floating_ip_address'])
|
|
master_host = ports['vip']['client'].exec_command(
|
|
'hostname').rstrip()
|
|
LOG.debug('(obtained from public VIP) master_host = ' + master_host)
|
|
# verify private VIP connectivity
|
|
private_vip_client = self._get_client(
|
|
vip_ip, proxy_client=proxy_client)
|
|
master_host_private = private_vip_client.exec_command(
|
|
'hostname').rstrip()
|
|
LOG.debug('(obtained from private VIP) master_host = ' +
|
|
master_host_private)
|
|
self.assertEqual(master_host, master_host_private)
|
|
|
|
LOG.debug('Stopping master host')
|
|
master_host_id = self._get_vm_id_by_name(master_host, vms)
|
|
self.os_primary.servers_client.stop_server(master_host_id)
|
|
waiters.wait_for_server_status(self.os_primary.servers_client,
|
|
master_host_id, 'SHUTOFF')
|
|
|
|
# verify public VIP connectivity
|
|
ports['vip']['client'] = self._get_client(
|
|
ports['vip']['fip']['floating_ip_address'])
|
|
new_master_host = ports['vip']['client'].exec_command(
|
|
'hostname').rstrip()
|
|
LOG.debug('(obtained from public VIP) new_master_host = ' +
|
|
new_master_host)
|
|
self.assertNotEqual(master_host, new_master_host)
|
|
# verify private VIP connectivity
|
|
private_vip_client = self._get_client(
|
|
vip_ip, proxy_client=proxy_client)
|
|
new_master_host_private = private_vip_client.exec_command(
|
|
'hostname').rstrip()
|
|
LOG.debug('(obtained from private VIP) new_master_host = ' +
|
|
new_master_host_private)
|
|
self.assertEqual(new_master_host, new_master_host_private)
|