Merge "Multiple external networks for dynamic workloads"
This commit is contained in:
commit
434d4f5109
@ -591,6 +591,10 @@ workloads:
|
|||||||
shift_on_stack_workload: poddensity
|
shift_on_stack_workload: poddensity
|
||||||
shift_on_stack_kubeconfig_paths:
|
shift_on_stack_kubeconfig_paths:
|
||||||
- /home/stack/.kube/config
|
- /home/stack/.kube/config
|
||||||
|
# num_external_networks are the number of the external networks to be
|
||||||
|
# created as part of rally context for dynamic workloads. These external
|
||||||
|
# networks will be used in a round robin fashion by the iterations.
|
||||||
|
num_external_networks: 16
|
||||||
# workloads can be 'all', a single workload(Eg. : create_delete_servers),
|
# workloads can be 'all', a single workload(Eg. : create_delete_servers),
|
||||||
# or a comma separated string(Eg. : create_delete_servers,migrate_servers).
|
# or a comma separated string(Eg. : create_delete_servers,migrate_servers).
|
||||||
# Currently supported workloads : create_delete_servers, migrate_servers
|
# Currently supported workloads : create_delete_servers, migrate_servers
|
||||||
|
@ -349,6 +349,13 @@ class NeutronUtils(neutron_utils.NeutronScenario):
|
|||||||
"""
|
"""
|
||||||
return self.admin_clients("neutron").show_subnet(subnet_id)
|
return self.admin_clients("neutron").show_subnet(subnet_id)
|
||||||
|
|
||||||
|
def show_port(self, port_id):
|
||||||
|
"""Show information of a given port
|
||||||
|
:param port_id: ID of subnet to look up
|
||||||
|
:returns: details of the port
|
||||||
|
"""
|
||||||
|
return self.admin_clients("neutron").show_port(port_id)
|
||||||
|
|
||||||
def get_router_from_context(self):
|
def get_router_from_context(self):
|
||||||
"""Retrieve router that was created as part of Rally context
|
"""Retrieve router that was created as part of Rally context
|
||||||
:returns: router object that is part of Rally context
|
:returns: router object that is part of Rally context
|
||||||
|
@ -55,12 +55,17 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
num_trunk_vms, num_add_subports, num_add_subports_trunks, num_delete_subports,
|
num_trunk_vms, num_add_subports, num_add_subports_trunks, num_delete_subports,
|
||||||
num_delete_subports_trunks, octavia_image, octavia_flavor, user, user_data_file, num_lbs,
|
num_delete_subports_trunks, octavia_image, octavia_flavor, user, user_data_file, num_lbs,
|
||||||
num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms,
|
num_pools, num_clients, delete_num_lbs, delete_num_members, num_create_vms, num_delete_vms,
|
||||||
provider_phys_net, iface_name, iface_mac, num_vms_provider_net,
|
provider_phys_net, iface_name, iface_mac, num_vms_provider_net, num_external_networks,
|
||||||
shift_on_stack_job_iterations, shift_on_stack_qps, shift_on_stack_burst,
|
shift_on_stack_job_iterations, shift_on_stack_qps, shift_on_stack_burst,
|
||||||
shift_on_stack_workload, shift_on_stack_kubeconfig_paths, workloads="all",
|
shift_on_stack_workload, shift_on_stack_kubeconfig_paths, workloads="all",
|
||||||
router_create_args=None, network_create_args=None,
|
router_create_args=None, network_create_args=None,
|
||||||
subnet_create_args=None, **kwargs):
|
subnet_create_args=None, **kwargs):
|
||||||
|
|
||||||
|
context_ext_net_id = self.context["external_networks"][((self.context["iteration"]-1)
|
||||||
|
% num_external_networks)]["id"]
|
||||||
|
self.log_info("Using external network {} from context for iteration {}".format(
|
||||||
|
context_ext_net_id, self.context["iteration"]))
|
||||||
|
|
||||||
workloads_list = workloads.split(",")
|
workloads_list = workloads.split(",")
|
||||||
self.trunk_vm_user = "centos"
|
self.trunk_vm_user = "centos"
|
||||||
self.jumphost_user = "cirros"
|
self.jumphost_user = "cirros"
|
||||||
@ -76,14 +81,14 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
router_create_args["name"] = self.generate_random_name()
|
router_create_args["name"] = self.generate_random_name()
|
||||||
router_create_args["tenant_id"] = self.context["tenant"]["id"]
|
router_create_args["tenant_id"] = self.context["tenant"]["id"]
|
||||||
router_create_args.setdefault(
|
router_create_args.setdefault(
|
||||||
"external_gateway_info", {"network_id": ext_net_id, "enable_snat": True}
|
"external_gateway_info", {"network_id": context_ext_net_id, "enable_snat": True}
|
||||||
)
|
)
|
||||||
self.router = self._create_router(router_create_args)
|
self.router = self._create_router(router_create_args)
|
||||||
self.log_info("router {} created for this iteration".format(self.router))
|
self.log_info("router {} created for this iteration".format(self.router))
|
||||||
|
|
||||||
self.keypair = self.context["user"]["keypair"]
|
self.keypair = self.context["user"]["keypair"]
|
||||||
|
|
||||||
self.ext_net_name = self.clients("neutron").show_network(ext_net_id)["network"][
|
self.ext_net_name = self.clients("neutron").show_network(context_ext_net_id)["network"][
|
||||||
"name"]
|
"name"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -99,9 +104,18 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
if(workloads == "all" or "migrate_servers" in workloads_list or
|
if(workloads == "all" or "migrate_servers" in workloads_list or
|
||||||
"swap_floating_ips_between_servers" in workloads_list or
|
"swap_floating_ips_between_servers" in workloads_list or
|
||||||
"stop_start_servers" in workloads_list):
|
"stop_start_servers" in workloads_list):
|
||||||
self.boot_servers_with_fip(smallest_image, smallest_flavor, ext_net_id,
|
if self.context["iteration"] % 5 != 0:
|
||||||
num_vms_to_create_with_fip,
|
self.boot_servers_with_fip(smallest_image, smallest_flavor, context_ext_net_id,
|
||||||
network_create_args, subnet_create_args, **kwargs)
|
num_vms_to_create_with_fip,
|
||||||
|
network_create_args, subnet_create_args, **kwargs)
|
||||||
|
else:
|
||||||
|
# Every 5th iteration uses the router from rally context, which uses
|
||||||
|
# the default external network provided in browbeat-config.yaml as
|
||||||
|
# gateway. So we pass this default external network as a parameter
|
||||||
|
# for every 5th iteration.
|
||||||
|
self.boot_servers_with_fip(smallest_image, smallest_flavor, ext_net_id,
|
||||||
|
num_vms_to_create_with_fip,
|
||||||
|
network_create_args, subnet_create_args, **kwargs)
|
||||||
|
|
||||||
if workloads == "all" or "migrate_servers" in workloads_list:
|
if workloads == "all" or "migrate_servers" in workloads_list:
|
||||||
self.migrate_servers_with_fip(num_vms_to_migrate)
|
self.migrate_servers_with_fip(num_vms_to_migrate)
|
||||||
@ -113,7 +127,7 @@ class DynamicWorkload(vm.VMDynamicScenario, trunk.TrunkDynamicScenario,
|
|||||||
self.stop_start_servers_with_fip(num_stop_start_vms)
|
self.stop_start_servers_with_fip(num_stop_start_vms)
|
||||||
|
|
||||||
if workloads == "all" or "pod_fip_simulation" in workloads_list:
|
if workloads == "all" or "pod_fip_simulation" in workloads_list:
|
||||||
self.pod_fip_simulation(ext_net_id, trunk_image, trunk_flavor, smallest_image,
|
self.pod_fip_simulation(context_ext_net_id, trunk_image, trunk_flavor, smallest_image,
|
||||||
smallest_flavor, num_initial_subports, num_trunk_vms)
|
smallest_flavor, num_initial_subports, num_trunk_vms)
|
||||||
|
|
||||||
if workloads == "all" or "add_subports_to_random_trunks" in workloads_list:
|
if workloads == "all" or "add_subports_to_random_trunks" in workloads_list:
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
{% set shift_on_stack_burst = shift_on_stack_burst or 20 %}
|
{% set shift_on_stack_burst = shift_on_stack_burst or 20 %}
|
||||||
{% set shift_on_stack_workload = shift_on_stack_workload or 'poddensity' %}
|
{% set shift_on_stack_workload = shift_on_stack_workload or 'poddensity' %}
|
||||||
{% set shift_on_stack_kubeconfig_paths = shift_on_stack_kubeconfig_paths or ['/home/stack/.kube/config'] %}
|
{% set shift_on_stack_kubeconfig_paths = shift_on_stack_kubeconfig_paths or ['/home/stack/.kube/config'] %}
|
||||||
|
{% set num_external_networks = num_external_networks or 16 %}
|
||||||
{% set router_external = router_external or True %}
|
{% set router_external = router_external or True %}
|
||||||
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
||||||
{% set sla_max_failure = sla_max_failure or 0 %}
|
{% set sla_max_failure = sla_max_failure or 0 %}
|
||||||
@ -80,6 +81,7 @@ BrowbeatPlugin.dynamic_workload:
|
|||||||
iface_mac: '{{ iface_mac }}'
|
iface_mac: '{{ iface_mac }}'
|
||||||
num_vms_provider_net: {{ num_vms_provider_net }}
|
num_vms_provider_net: {{ num_vms_provider_net }}
|
||||||
ext_net_id: '{{ext_net_id}}'
|
ext_net_id: '{{ext_net_id}}'
|
||||||
|
num_external_networks: {{ num_external_networks }}
|
||||||
workloads: '{{workloads}}'
|
workloads: '{{workloads}}'
|
||||||
runner:
|
runner:
|
||||||
concurrency: {{concurrency}}
|
concurrency: {{concurrency}}
|
||||||
@ -108,6 +110,10 @@ BrowbeatPlugin.dynamic_workload:
|
|||||||
external: {{router_external}}
|
external: {{router_external}}
|
||||||
external_gateway_info:
|
external_gateway_info:
|
||||||
network_id: {{ext_net_id}}
|
network_id: {{ext_net_id}}
|
||||||
|
create_external_networks:
|
||||||
|
num_external_networks: {{ num_external_networks }}
|
||||||
|
interface_name: '{{ iface_name }}'
|
||||||
|
provider_phys_net: '{{ provider_phys_net }}'
|
||||||
sla:
|
sla:
|
||||||
max_avg_duration: {{sla_max_avg_duration}}
|
max_avg_duration: {{sla_max_avg_duration}}
|
||||||
max_seconds_per_iteration: {{sla_max_seconds}}
|
max_seconds_per_iteration: {{sla_max_seconds}}
|
||||||
|
208
rally/rally-plugins/dynamic-workloads/rally_context.py
Normal file
208
rally/rally-plugins/dynamic-workloads/rally_context.py
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from rally.task import context
|
||||||
|
from rally.common import logging
|
||||||
|
from rally.common import utils
|
||||||
|
from rally import consts
|
||||||
|
from rally_openstack import osclients
|
||||||
|
from rally_openstack.wrappers import network as network_wrapper
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
@context.configure(name="create_external_networks", order=1000)
|
||||||
|
class CreateExternalNetworksContext(context.Context):
|
||||||
|
"""This plugin creates external networks with specified option."""
|
||||||
|
|
||||||
|
CONFIG_SCHEMA = {
|
||||||
|
"type": "object",
|
||||||
|
"$schema": consts.JSON_SCHEMA,
|
||||||
|
"additionalProperties": False,
|
||||||
|
"properties": {
|
||||||
|
"num_external_networks": {
|
||||||
|
"type": "integer",
|
||||||
|
"minimum": 1
|
||||||
|
},
|
||||||
|
"interface_name": {
|
||||||
|
"type": "string"
|
||||||
|
},
|
||||||
|
"provider_phys_net": {
|
||||||
|
"type": "string"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
def _create_subnet(self, tenant_id, network_id, network_number):
|
||||||
|
"""Create subnet for external network
|
||||||
|
|
||||||
|
:param tenant_id: ID of tenant
|
||||||
|
:param network_id: ID of external network
|
||||||
|
:param network_number: int, number for CIDR of subnet
|
||||||
|
:returns: subnet object
|
||||||
|
"""
|
||||||
|
subnet_args = {
|
||||||
|
"subnet": {
|
||||||
|
"tenant_id": tenant_id,
|
||||||
|
"network_id": network_id,
|
||||||
|
"name": self.net_wrapper.owner.generate_random_name(),
|
||||||
|
"ip_version": 4,
|
||||||
|
"cidr": "172.31.{}.0/23".format(network_number),
|
||||||
|
"enable_dhcp": True,
|
||||||
|
"dns_nameservers": ["8.8.8.8", "8.8.4.4"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return self.net_wrapper.client.create_subnet(subnet_args)["subnet"]
|
||||||
|
|
||||||
|
def setup(self):
|
||||||
|
"""This method is called before the task starts."""
|
||||||
|
self.net_wrapper = network_wrapper.wrap(
|
||||||
|
osclients.Clients(self.context["admin"]["credential"]),
|
||||||
|
self,
|
||||||
|
config=self.config,
|
||||||
|
)
|
||||||
|
self.context["external_networks"] = []
|
||||||
|
self.context["external_subnets"] = {}
|
||||||
|
self.num_external_networks = self.config.get("num_external_networks", 16)
|
||||||
|
self.interface_name = self.config.get("interface_name", "ens7f1")
|
||||||
|
|
||||||
|
num_external_networks_created = 0
|
||||||
|
|
||||||
|
while num_external_networks_created < self.num_external_networks:
|
||||||
|
has_error_occured = False
|
||||||
|
for user, tenant_id in utils.iterate_per_tenants(
|
||||||
|
self.context.get("users", [])
|
||||||
|
):
|
||||||
|
cmd = ["sudo", "ip", "link", "add", "link", self.interface_name, "name",
|
||||||
|
"{}.{}".format(self.interface_name, num_external_networks_created + 1),
|
||||||
|
"type", "vlan", "id", str(num_external_networks_created + 1)]
|
||||||
|
proc = subprocess.Popen(cmd)
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode == 0:
|
||||||
|
LOG.debug("Creating vlan {} on interface {} was successful".format(
|
||||||
|
num_external_networks_created + 1, self.interface_name))
|
||||||
|
else:
|
||||||
|
LOG.exception("Creating vlan {} on interface {} failed".format(
|
||||||
|
num_external_networks_created + 1, self.interface_name))
|
||||||
|
has_error_occured = True
|
||||||
|
break
|
||||||
|
|
||||||
|
cmd = ["sudo", "ip", "link", "set", "dev",
|
||||||
|
"{}.{}".format(self.interface_name, num_external_networks_created + 1),
|
||||||
|
"up"]
|
||||||
|
proc = subprocess.Popen(cmd)
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode == 0:
|
||||||
|
LOG.debug("Setting vlan {} up on interface {} was successful".format(
|
||||||
|
num_external_networks_created + 1, self.interface_name))
|
||||||
|
else:
|
||||||
|
LOG.exception("Setting vlan {} up on interface {} failed".format(
|
||||||
|
num_external_networks_created + 1, self.interface_name))
|
||||||
|
has_error_occured = True
|
||||||
|
break
|
||||||
|
|
||||||
|
cmd = ["sudo", "ip", "a", "a", "172.31.{}.0/23".format(
|
||||||
|
num_external_networks_created*2 + 1), "dev",
|
||||||
|
"{}.{}".format(self.interface_name, num_external_networks_created + 1)]
|
||||||
|
proc = subprocess.Popen(cmd)
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode == 0:
|
||||||
|
LOG.debug("Adding IP range to interface {} was successful".format(
|
||||||
|
self.interface_name))
|
||||||
|
else:
|
||||||
|
LOG.exception("Adding IP range to interface {} failed".format(
|
||||||
|
self.interface_name))
|
||||||
|
has_error_occured = True
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
kwargs = {
|
||||||
|
"network_create_args": {
|
||||||
|
"provider:network_type": "vlan",
|
||||||
|
"provider:physical_network": self.config.get("provider_phys_net",
|
||||||
|
"datacentre"),
|
||||||
|
"provider:segmentation_id": num_external_networks_created + 1,
|
||||||
|
"router:external": True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
self.context["external_networks"].append(
|
||||||
|
self.net_wrapper.create_network(tenant_id, **kwargs)
|
||||||
|
)
|
||||||
|
LOG.debug(
|
||||||
|
"External network with id '%s' created as part of context"
|
||||||
|
% self.context["external_networks"][-1]["id"]
|
||||||
|
)
|
||||||
|
num_external_networks_created += 1
|
||||||
|
except Exception as e:
|
||||||
|
msg = "Can't create external network {} as part of context: {}".format(
|
||||||
|
num_external_networks_created, e
|
||||||
|
)
|
||||||
|
LOG.exception(msg)
|
||||||
|
has_error_occured = True
|
||||||
|
break
|
||||||
|
|
||||||
|
try:
|
||||||
|
subnet = self._create_subnet(tenant_id,
|
||||||
|
self.context["external_networks"][-1]["id"],
|
||||||
|
(num_external_networks_created - 1) * 2 + 1)
|
||||||
|
self.context["external_subnets"][
|
||||||
|
self.context["external_networks"][-1]["id"]] = subnet
|
||||||
|
LOG.debug(
|
||||||
|
"External subnet with id '%s' created as part of context"
|
||||||
|
% subnet["id"]
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
msg = "Can't create external subnet {} as part of context: {}".format(
|
||||||
|
num_external_networks_created, e
|
||||||
|
)
|
||||||
|
LOG.exception(msg)
|
||||||
|
has_error_occured = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if has_error_occured:
|
||||||
|
break
|
||||||
|
|
||||||
|
def cleanup(self):
|
||||||
|
"""This method is called after the task finishes."""
|
||||||
|
for i in range(self.num_external_networks):
|
||||||
|
try:
|
||||||
|
external_net = self.context["external_networks"][i]
|
||||||
|
external_net_id = external_net["id"]
|
||||||
|
external_subnet = self.context["external_subnets"][external_net_id]
|
||||||
|
external_subnet_id = external_subnet["id"]
|
||||||
|
self.net_wrapper._delete_subnet(external_subnet_id)
|
||||||
|
LOG.debug(
|
||||||
|
"External subnet with id '%s' deleted from context"
|
||||||
|
% external_subnet_id
|
||||||
|
)
|
||||||
|
self.net_wrapper.delete_network(external_net)
|
||||||
|
LOG.debug(
|
||||||
|
"External network with id '%s' deleted from context"
|
||||||
|
% external_net_id
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
msg = "Can't delete external network {} from context: {}".format(
|
||||||
|
external_net_id, e
|
||||||
|
)
|
||||||
|
LOG.warning(msg)
|
||||||
|
|
||||||
|
cmd = ["sudo", "ip", "link", "delete", "{}.{}".format(self.interface_name, i + 1)]
|
||||||
|
proc = subprocess.Popen(cmd)
|
||||||
|
proc.wait()
|
||||||
|
if proc.returncode == 0:
|
||||||
|
LOG.debug("Deleting vlan {}.{} was successful".format(
|
||||||
|
self.interface_name, i + 1))
|
||||||
|
else:
|
||||||
|
LOG.exception("Deleting vlan {}.{} failed".format(
|
||||||
|
self.interface_name, i + 1))
|
@ -74,6 +74,10 @@ class TrunkDynamicScenario(
|
|||||||
:param jump_fip: floating ip of jumphost
|
:param jump_fip: floating ip of jumphost
|
||||||
"""
|
"""
|
||||||
trunk = self.clients("neutron").show_trunk(trunk_id)
|
trunk = self.clients("neutron").show_trunk(trunk_id)
|
||||||
|
trunk_ext_net_id = self.get_ext_net_id_by_trunk(trunk["trunk"])
|
||||||
|
trunk_ext_net_name = self.clients("neutron").show_network(trunk_ext_net_id)[
|
||||||
|
"network"]["name"]
|
||||||
|
|
||||||
subport_count = len(trunk["trunk"]["sub_ports"])
|
subport_count = len(trunk["trunk"]["sub_ports"])
|
||||||
subport_number_for_route = random.randint(1, subport_count)
|
subport_number_for_route = random.randint(1, subport_count)
|
||||||
subport_for_route = self.clients("neutron").show_port(
|
subport_for_route = self.clients("neutron").show_port(
|
||||||
@ -83,7 +87,7 @@ class TrunkDynamicScenario(
|
|||||||
self.add_route_from_vm_to_jumphost(vm_fip, jump_fip, self.trunk_vm_user,
|
self.add_route_from_vm_to_jumphost(vm_fip, jump_fip, self.trunk_vm_user,
|
||||||
subport_number_for_route,
|
subport_number_for_route,
|
||||||
subnet_for_route["subnet"]["gateway_ip"])
|
subnet_for_route["subnet"]["gateway_ip"])
|
||||||
subport_fip = self._create_floatingip(self.ext_net_name)["floatingip"]
|
subport_fip = self._create_floatingip(trunk_ext_net_name)["floatingip"]
|
||||||
msg = "ping subport: {} with fip: {} of trunk: {} with fip: {} from jumphost" \
|
msg = "ping subport: {} with fip: {} of trunk: {} with fip: {} from jumphost" \
|
||||||
" with fip: {}".format(subport_for_route["port"], subport_fip, trunk["trunk"],
|
" with fip: {}".format(subport_for_route["port"], subport_fip, trunk["trunk"],
|
||||||
vm_fip, jump_fip)
|
vm_fip, jump_fip)
|
||||||
@ -116,13 +120,35 @@ class TrunkDynamicScenario(
|
|||||||
:param trunk: dict, trunk details
|
:param trunk: dict, trunk details
|
||||||
:returns: floating ip of jumphost
|
:returns: floating ip of jumphost
|
||||||
"""
|
"""
|
||||||
if trunk["description"].startswith("jumphost:"):
|
trunk_details = trunk["description"].split("&&")
|
||||||
jumphost_fip = trunk["description"][9:]
|
if trunk_details[0].startswith("jumphost:"):
|
||||||
|
jumphost_fip = trunk_details[0][9:]
|
||||||
return jumphost_fip
|
return jumphost_fip
|
||||||
|
|
||||||
def create_subnets_and_subports(self, subport_count):
|
def get_ext_net_id_by_trunk(self, trunk):
|
||||||
|
"""Get external network id for a given trunk
|
||||||
|
:param trunk: dict, trunk details
|
||||||
|
:returns: external network id
|
||||||
|
"""
|
||||||
|
trunk_details = trunk["description"].split("&&")
|
||||||
|
if trunk_details[1].startswith("ext_net_id:"):
|
||||||
|
ext_net_id = trunk_details[1][11:]
|
||||||
|
return ext_net_id
|
||||||
|
|
||||||
|
def get_router_by_trunk(self, trunk):
|
||||||
|
"""Get router for a given trunk
|
||||||
|
:param trunk: dict, trunk details
|
||||||
|
:returns: router object
|
||||||
|
"""
|
||||||
|
trunk_details = trunk["description"].split("&&")
|
||||||
|
if trunk_details[2].startswith("router:"):
|
||||||
|
router = self.show_router(trunk_details[2][7:])
|
||||||
|
return router
|
||||||
|
|
||||||
|
def create_subnets_and_subports(self, subport_count, router):
|
||||||
"""Create <<subport_count>> subnets and subports
|
"""Create <<subport_count>> subnets and subports
|
||||||
:param subport_count: int, number of subports to create
|
:param subport_count: int, number of subports to create
|
||||||
|
:param router: router object
|
||||||
:returns: list of subnets, list of subports
|
:returns: list of subnets, list of subports
|
||||||
"""
|
"""
|
||||||
subnets = []
|
subnets = []
|
||||||
@ -140,7 +166,7 @@ class TrunkDynamicScenario(
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
self._add_interface_router(subnet[0]["subnet"], self.router["router"])
|
self._add_interface_router(subnet[0]["subnet"], router["router"])
|
||||||
return subnets, subports
|
return subnets, subports
|
||||||
|
|
||||||
def add_subports_to_trunk_and_vm(self, subports, trunk_id, vm_ssh, start_seg_id):
|
def add_subports_to_trunk_and_vm(self, subports, trunk_id, vm_ssh, start_seg_id):
|
||||||
@ -192,6 +218,7 @@ class TrunkDynamicScenario(
|
|||||||
network = self._create_network({})
|
network = self._create_network({})
|
||||||
subnet = self._create_subnet(network, {})
|
subnet = self._create_subnet(network, {})
|
||||||
self._add_interface_router(subnet["subnet"], self.router["router"])
|
self._add_interface_router(subnet["subnet"], self.router["router"])
|
||||||
|
self.ext_net_id = ext_net_id
|
||||||
|
|
||||||
kwargs = {}
|
kwargs = {}
|
||||||
kwargs["nics"] = [{"net-id": network["network"]["id"]}]
|
kwargs["nics"] = [{"net-id": network["network"]["id"]}]
|
||||||
@ -211,7 +238,9 @@ class TrunkDynamicScenario(
|
|||||||
# Using tags for trunk returns an error,
|
# Using tags for trunk returns an error,
|
||||||
# so we instead use description.
|
# so we instead use description.
|
||||||
trunk_payload = {"port_id": parent["port"]["id"],
|
trunk_payload = {"port_id": parent["port"]["id"],
|
||||||
"description": "jumphost:"+str(jump_fip)}
|
"description": ("jumphost:" + str(jump_fip) +
|
||||||
|
"&&ext_net_id:" + str(self.ext_net_id) +
|
||||||
|
"&&router:" + str(self.router["router"]["id"]))}
|
||||||
trunk = self._create_trunk(trunk_payload)
|
trunk = self._create_trunk(trunk_payload)
|
||||||
self.acquire_lock(trunk["trunk"]["id"])
|
self.acquire_lock(trunk["trunk"]["id"])
|
||||||
kwargs["nics"] = [{"port-id": parent["port"]["id"]}]
|
kwargs["nics"] = [{"port-id": parent["port"]["id"]}]
|
||||||
@ -222,7 +251,7 @@ class TrunkDynamicScenario(
|
|||||||
**kwargs)
|
**kwargs)
|
||||||
vm_fip = vm[1]["ip"]
|
vm_fip = vm[1]["ip"]
|
||||||
|
|
||||||
subnets, subports = self.create_subnets_and_subports(subport_count)
|
subnets, subports = self.create_subnets_and_subports(subport_count, self.router)
|
||||||
|
|
||||||
msg = "Trunk VM: {} with Trunk: {} Port: {} Subports: {} Jumphost: {}" \
|
msg = "Trunk VM: {} with Trunk: {} Port: {} Subports: {} Jumphost: {}" \
|
||||||
"created".format(vm, trunk["trunk"], parent["port"],
|
"created".format(vm, trunk["trunk"], parent["port"],
|
||||||
@ -261,8 +290,9 @@ class TrunkDynamicScenario(
|
|||||||
# Get updated trunk object, as the trunk may have
|
# Get updated trunk object, as the trunk may have
|
||||||
# been changed in other iterations
|
# been changed in other iterations
|
||||||
trunk = self.clients("neutron").show_trunk(trunk["id"])["trunk"]
|
trunk = self.clients("neutron").show_trunk(trunk["id"])["trunk"]
|
||||||
|
trunk_router = self.get_router_by_trunk(trunk)
|
||||||
|
|
||||||
subnets, subports = self.create_subnets_and_subports(subport_count)
|
subnets, subports = self.create_subnets_and_subports(subport_count, trunk_router)
|
||||||
|
|
||||||
trunk_server_fip = self.get_server_by_trunk(trunk)
|
trunk_server_fip = self.get_server_by_trunk(trunk)
|
||||||
jump_fip = self.get_jumphost_by_trunk(trunk)
|
jump_fip = self.get_jumphost_by_trunk(trunk)
|
||||||
@ -361,7 +391,9 @@ class TrunkDynamicScenario(
|
|||||||
def swap_floating_ips_between_random_subports(self):
|
def swap_floating_ips_between_random_subports(self):
|
||||||
"""Swap floating IPs between 2 randomly chosen subports from 2 trunks
|
"""Swap floating IPs between 2 randomly chosen subports from 2 trunks
|
||||||
"""
|
"""
|
||||||
trunks = [trunk for trunk in self._list_trunks() if len(trunk["sub_ports"]) > 0]
|
trunks = [trunk for trunk in self._list_trunks() if (len(trunk["sub_ports"]) > 0 and
|
||||||
|
self.ext_net_id ==
|
||||||
|
self.get_ext_net_id_by_trunk(trunk))]
|
||||||
|
|
||||||
if len(trunks) < 2:
|
if len(trunks) < 2:
|
||||||
self.log_info("""Number of eligible trunks not sufficient
|
self.log_info("""Number of eligible trunks not sufficient
|
||||||
@ -376,9 +408,13 @@ class TrunkDynamicScenario(
|
|||||||
if len(trunks_for_swapping) == 2:
|
if len(trunks_for_swapping) == 2:
|
||||||
break
|
break
|
||||||
|
|
||||||
|
self.log_info("Trunks for swapping : {}".format(trunks_for_swapping))
|
||||||
|
|
||||||
if len(trunks_for_swapping) < 2:
|
if len(trunks_for_swapping) < 2:
|
||||||
self.log_info("""Number of unlocked trunks not sufficient
|
self.log_info("""Number of unlocked trunks not sufficient
|
||||||
for swapping floating IPs between trunk subports""")
|
for swapping floating IPs between trunk subports""")
|
||||||
|
for trunk in trunks_for_swapping:
|
||||||
|
self.release_lock(trunk["id"])
|
||||||
return
|
return
|
||||||
|
|
||||||
# Get updated trunk object, as the trunk may have
|
# Get updated trunk object, as the trunk may have
|
||||||
|
@ -82,6 +82,9 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
:param kwargs: dict, Keyword arguments to function
|
:param kwargs: dict, Keyword arguments to function
|
||||||
"""
|
"""
|
||||||
ext_net_name = None
|
ext_net_name = None
|
||||||
|
|
||||||
|
self.ext_net_id = ext_net_id
|
||||||
|
|
||||||
if ext_net_id:
|
if ext_net_id:
|
||||||
ext_net_name = self.clients("neutron").show_network(ext_net_id)["network"][
|
ext_net_name = self.clients("neutron").show_network(ext_net_id)["network"][
|
||||||
"name"
|
"name"
|
||||||
@ -161,30 +164,33 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
def swap_floating_ips_between_servers(self):
|
def swap_floating_ips_between_servers(self):
|
||||||
"""Swap floating IPs between servers
|
"""Swap floating IPs between servers
|
||||||
"""
|
"""
|
||||||
eligible_servers = list(filter(lambda server: self._get_fip_by_server(server) is not False,
|
kwargs = {"floating_network_id": self.ext_net_id}
|
||||||
self._get_servers_by_tag("migrate_swap_or_stopstart")))
|
eligible_floating_ips = self._list_floating_ips(**kwargs)["floatingips"]
|
||||||
|
|
||||||
servers_for_swapping = []
|
floating_ips_to_swap = []
|
||||||
for server in eligible_servers:
|
servers_to_swap = []
|
||||||
if not self.acquire_lock(server.id):
|
|
||||||
continue
|
for floatingip in eligible_floating_ips:
|
||||||
servers_for_swapping.append(server)
|
fip_port_id = floatingip["port_id"]
|
||||||
if len(servers_for_swapping) == 2:
|
port = self.show_port(fip_port_id)["port"]
|
||||||
|
if port["device_owner"] == "compute:nova":
|
||||||
|
server = self.show_server(port["device_id"])
|
||||||
|
if "migrate_swap_or_stopstart" in server.tags and self.acquire_lock(server.id):
|
||||||
|
floating_ips_to_swap.append(floatingip)
|
||||||
|
servers_to_swap.append(server)
|
||||||
|
if len(servers_to_swap) == 2:
|
||||||
break
|
break
|
||||||
|
|
||||||
if len(servers_for_swapping) < 2:
|
if len(servers_to_swap) < 2:
|
||||||
self.log_info("""Number of unlocked servers not sufficient
|
self.log_info("""Number of unlocked servers not sufficient
|
||||||
for swapping floating IPs between servers""")
|
for swapping floating IPs between servers""")
|
||||||
return
|
return
|
||||||
|
|
||||||
kwargs = {"floating_ip_address": self._get_fip_by_server(servers_for_swapping[0])}
|
server1_fip = floating_ips_to_swap[0]
|
||||||
server1_fip = self._list_floating_ips(**kwargs)["floatingips"][0]
|
server2_fip = floating_ips_to_swap[1]
|
||||||
|
|
||||||
kwargs = {"floating_ip_address": self._get_fip_by_server(servers_for_swapping[1])}
|
server1_port_id = server1_fip["port_id"]
|
||||||
server2_fip = self._list_floating_ips(**kwargs)["floatingips"][0]
|
server2_port_id = server2_fip["port_id"]
|
||||||
|
|
||||||
server1_port = server1_fip["port_id"]
|
|
||||||
server2_port = server2_fip["port_id"]
|
|
||||||
|
|
||||||
fip_update_dict = {"port_id": None}
|
fip_update_dict = {"port_id": None}
|
||||||
self.clients("neutron").update_floatingip(
|
self.clients("neutron").update_floatingip(
|
||||||
@ -200,11 +206,11 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
self._wait_for_ping_failure(server2_fip["floating_ip_address"])
|
self._wait_for_ping_failure(server2_fip["floating_ip_address"])
|
||||||
|
|
||||||
# Swap floating IPs between server1 and server2
|
# Swap floating IPs between server1 and server2
|
||||||
fip_update_dict = {"port_id": server2_port}
|
fip_update_dict = {"port_id": server2_port_id}
|
||||||
self.clients("neutron").update_floatingip(
|
self.clients("neutron").update_floatingip(
|
||||||
server1_fip["id"], {"floatingip": fip_update_dict}
|
server1_fip["id"], {"floatingip": fip_update_dict}
|
||||||
)
|
)
|
||||||
fip_update_dict = {"port_id": server1_port}
|
fip_update_dict = {"port_id": server1_port_id}
|
||||||
self.clients("neutron").update_floatingip(
|
self.clients("neutron").update_floatingip(
|
||||||
server2_fip["id"], {"floatingip": fip_update_dict}
|
server2_fip["id"], {"floatingip": fip_update_dict}
|
||||||
)
|
)
|
||||||
@ -216,8 +222,8 @@ class VMDynamicScenario(dynamic_utils.NovaUtils,
|
|||||||
self._wait_for_ping(server2_fip["floating_ip_address"])
|
self._wait_for_ping(server2_fip["floating_ip_address"])
|
||||||
|
|
||||||
# Release locks from servers
|
# Release locks from servers
|
||||||
self.release_lock(servers_for_swapping[0].id)
|
self.release_lock(servers_to_swap[0].id)
|
||||||
self.release_lock(servers_for_swapping[1].id)
|
self.release_lock(servers_to_swap[1].id)
|
||||||
|
|
||||||
def stop_start_servers_with_fip(self, num_vms):
|
def stop_start_servers_with_fip(self, num_vms):
|
||||||
"""Stop and start random servers
|
"""Stop and start random servers
|
||||||
|
Loading…
x
Reference in New Issue
Block a user