Add workload for Hybrid Computes
This patch adds a workload which boots VMs with the following specifications. 1. On non-NFV compute nodes : 1 port in a tenant network, 1 port in a provider network 2. On DPDK compute nodes: 1 SR-IOV/tenant network port, 1 DPDK port 3. On OvS Hardware Offload compute nodes: 1 SR-IOV/tenant network port, 1 Hardware Offload port Change-Id: I76d468c333f919219db9525f0df2ac911f1a719f
This commit is contained in:
parent
865e85a277
commit
04032a49ab
@ -415,6 +415,53 @@ workloads:
|
||||
- 8
|
||||
times: 10
|
||||
scenarios:
|
||||
# This is a scenario developed specifically for an environment with a mix of
|
||||
# non-NFV compute nodes, ComputeOvsDpdk/ComputeOvsDpdkSriov nodes,
|
||||
# and compute nodes with OvS Hardware Offload/SR-IOV+OvS Hardware Offload.
|
||||
# It creates three different availability zones for the three different types of compute nodes
|
||||
# and boots VMs in them accordingly.
|
||||
# The scenario boots <times> number of VMs in the availability zone which has
|
||||
# the least number of compute nodes and automatically calculates the number of
|
||||
# VMs to boot in the other AZs based on the proportional_scale option.
|
||||
- name: nova-boot-hybrid-computes
|
||||
enabled: true
|
||||
image_name: centos7
|
||||
vanilla_flavor_name: m1.small
|
||||
vanilla_phys_net: provider-0
|
||||
sriov_phys_net: provider-1
|
||||
# option to enable booting DPDK instances in the tests
|
||||
boot_dpdk_vms: False
|
||||
dpdk_flavor_name: m1.small
|
||||
dpdk_phys_net: provider-0
|
||||
# Management network type for the VM can be either tenant and sriov.
|
||||
# The port on the management network should be used for general access to
|
||||
# to the VM and not for dataplane traffic.
|
||||
dpdk_management_nw_type: tenant
|
||||
dpdk_hosts_group: ComputeOvsDpdk
|
||||
# option to enable booting instances with hardware offload configured in the tests
|
||||
boot_hw_offload_vms: False
|
||||
hw_offload_flavor_name: m1.small
|
||||
hw_offload_phys_net: provider-2
|
||||
# Management network type for the VM can be either tenant and sriov.
|
||||
# The port on the management network should be used for general access to
|
||||
# to the VM and not for dataplane traffic.
|
||||
hw_offload_management_nw_type: tenant
|
||||
hw_offload_hosts_group: ComputeSriovOffload
|
||||
# path to tripleo inventory file
|
||||
tripleo_inventory_file: ansible/hosts.yml
|
||||
num_tenants: 2
|
||||
num_networks_per_tenant: 6
|
||||
# This option will scale VMs proportionally on
|
||||
# different compute node types. For example,
|
||||
# if there is 1 SR-IOV+DPDK compute node, 1 SR-IOV+HWOL
|
||||
# compute node and 3 non-NFV compute nodes, then
|
||||
# 3 non-NFV VMs will be booted in each iteration, while
|
||||
# 1 NFV VM of each type will be booted in an iteration.
|
||||
# This option should be enabled only when the environment
|
||||
# is stable, as it could boot a lot of VMs concurrently.
|
||||
proportional_scale: False
|
||||
nova_api_version: 2.74
|
||||
file: rally/rally-plugins/nova/nova_boot_hybrid_computes.yml
|
||||
- name: nova-boot-in-batches-with-delay
|
||||
enabled: true
|
||||
image_name: cirro5
|
||||
|
308
rally/rally-plugins/nova/nfv_context.py
Normal file
308
rally/rally-plugins/nova/nfv_context.py
Normal file
@ -0,0 +1,308 @@
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from rally.task import context
|
||||
from rally.common import logging
|
||||
from rally.common import utils
|
||||
from rally import consts
|
||||
from rally_openstack.common import osclients
|
||||
from rally_openstack.common.wrappers import network as network_wrapper
|
||||
|
||||
import yaml
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@context.configure(name="create_nfv_azs_and_networks", order=1100)
|
||||
class CreateNFVAZsandNetworksContext(context.Context):
|
||||
"""This plugin creates availability zones with host aggregates and networks
|
||||
for non-NFV, SR-IOV with DPDK and SR-IOV with hardware offload.
|
||||
"""
|
||||
|
||||
CONFIG_SCHEMA = {
|
||||
"type": "object",
|
||||
"$schema": consts.JSON_SCHEMA,
|
||||
"additionalProperties": True,
|
||||
"properties": {
|
||||
"boot_dpdk_vms": {
|
||||
"type": "string",
|
||||
"default": "False"
|
||||
},
|
||||
"boot_hw_offload_vms": {
|
||||
"type": "string",
|
||||
"default": "False"
|
||||
},
|
||||
"dpdk_hosts_group": {
|
||||
"type": "string",
|
||||
"default": "ComputeOvsDpdkSriov"
|
||||
},
|
||||
"hw_offload_hosts_group": {
|
||||
"type": "string",
|
||||
"default": "ComputeSriovOffload"
|
||||
},
|
||||
"tripleo_inventory_file": {
|
||||
"type": "string",
|
||||
"default": "/home/stack/browbeat/ansible/hosts.yml"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
def _create_subnet(self, tenant_id, network_id, network_number):
|
||||
"""Create subnet for network
|
||||
|
||||
:param tenant_id: ID of tenant
|
||||
:param network_id: ID of network
|
||||
:param network_number: int, number for CIDR of subnet
|
||||
:returns: subnet object
|
||||
"""
|
||||
subnet_args = {
|
||||
"subnet": {
|
||||
"tenant_id": tenant_id,
|
||||
"network_id": network_id,
|
||||
"name": self.net_wrapper.owner.generate_random_name(),
|
||||
"ip_version": 4,
|
||||
"cidr": "172.{}.0.0/16".format(network_number),
|
||||
"enable_dhcp": True,
|
||||
"gateway_ip": "172.{}.0.1".format(network_number),
|
||||
"allocation_pools": [{"start": "172.{}.0.2".format(network_number),
|
||||
"end": "172.{}.254.254".format(network_number)}]
|
||||
}
|
||||
}
|
||||
return self.net_wrapper.client.create_subnet(subnet_args)["subnet"]
|
||||
|
||||
def _create_networks_and_subnets(self, tenant_id):
|
||||
self.net_wrapper = network_wrapper.wrap(
|
||||
osclients.Clients(self.context["admin"]["credential"]),
|
||||
self,
|
||||
config=self.config,
|
||||
)
|
||||
|
||||
net_kwargs = {
|
||||
"network_create_args": {
|
||||
"provider:network_type": "vlan",
|
||||
"provider:physical_network": self.config["provider_phys_nets"]["vanilla"],
|
||||
"provider:segmentation_id": self.network_vlan_number,
|
||||
}
|
||||
}
|
||||
vanilla_network = self.net_wrapper.create_network(tenant_id, **net_kwargs)
|
||||
|
||||
self.context["vanilla_networks"][tenant_id] = vanilla_network
|
||||
self.context["vanilla_subnets"][tenant_id] = self._create_subnet(
|
||||
tenant_id, vanilla_network["id"],
|
||||
self.network_vlan_number-950)
|
||||
self.network_vlan_number += 1
|
||||
|
||||
if self.context["boot_dpdk_vms"]:
|
||||
net_kwargs = {
|
||||
"network_create_args": {
|
||||
"provider:network_type": "vlan",
|
||||
"provider:physical_network": self.config["provider_phys_nets"]["dpdk"],
|
||||
"provider:segmentation_id": self.network_vlan_number,
|
||||
}
|
||||
}
|
||||
|
||||
dpdk_network = self.net_wrapper.create_network(tenant_id, **net_kwargs)
|
||||
self.context["nfv_networks"].setdefault("dpdk", {})
|
||||
self.context["nfv_networks"]["dpdk"][tenant_id] = dpdk_network
|
||||
self.context["nfv_subnets"].setdefault("dpdk", {})
|
||||
self.context["nfv_subnets"]["dpdk"][tenant_id] = self._create_subnet(
|
||||
tenant_id, dpdk_network["id"],
|
||||
self.network_vlan_number-950)
|
||||
self.network_vlan_number += 1
|
||||
|
||||
net_kwargs = {
|
||||
"network_create_args": {
|
||||
"provider:network_type": "vlan",
|
||||
"provider:physical_network": self.config["provider_phys_nets"]["sriov"],
|
||||
"provider:segmentation_id": self.network_vlan_number,
|
||||
}
|
||||
}
|
||||
sriov_network = self.net_wrapper.create_network(tenant_id, **net_kwargs)
|
||||
|
||||
self.context["nfv_networks"].setdefault("sriov", {})
|
||||
self.context["nfv_networks"]["sriov"][tenant_id] = sriov_network
|
||||
self.context["nfv_subnets"].setdefault("sriov", {})
|
||||
self.context["nfv_subnets"]["sriov"][tenant_id] = self._create_subnet(
|
||||
tenant_id, sriov_network["id"],
|
||||
self.network_vlan_number-950)
|
||||
self.network_vlan_number += 1
|
||||
|
||||
if self.context["boot_hw_offload_vms"]:
|
||||
net_kwargs = {
|
||||
"network_create_args": {
|
||||
"provider:network_type": "vlan",
|
||||
"provider:physical_network": self.config["provider_phys_nets"]["hw_offload"],
|
||||
"provider:segmentation_id": self.network_vlan_number,
|
||||
}
|
||||
}
|
||||
hw_offload_network = self.net_wrapper.create_network(tenant_id, **net_kwargs)
|
||||
|
||||
self.context["nfv_networks"].setdefault("hw_offload", {})
|
||||
self.context["nfv_networks"]["hw_offload"][tenant_id] = hw_offload_network
|
||||
self.context["nfv_subnets"].setdefault("hw_offload", {})
|
||||
self.context["nfv_subnets"]["hw_offload"][tenant_id] = self._create_subnet(
|
||||
tenant_id, hw_offload_network["id"],
|
||||
self.network_vlan_number-950)
|
||||
self.network_vlan_number += 1
|
||||
|
||||
def setup(self):
|
||||
"""This method is called before the task starts."""
|
||||
self.net_wrapper = network_wrapper.wrap(
|
||||
osclients.Clients(self.context["admin"]["credential"]),
|
||||
self,
|
||||
config=self.config,
|
||||
)
|
||||
self.nova_wrapper = osclients.Nova(self.context["admin"]["credential"]).create_client()
|
||||
self.context["nfv_networks"] = {}
|
||||
self.context["nfv_subnets"] = {}
|
||||
self.context["vanilla_networks"] = {}
|
||||
self.context["vanilla_subnets"] = {}
|
||||
# This has been made a string value so that upper case/lower case
|
||||
# variations can be considered.
|
||||
self.context["boot_dpdk_vms"] = self.config.get("boot_dpdk_vms", "False") in [
|
||||
"True", "true"]
|
||||
self.context["boot_hw_offload_vms"] = self.config.get("boot_hw_offload_vms",
|
||||
"False") in [
|
||||
"True", "true"]
|
||||
self.dpdk_hosts_group = self.config.get("dpdk_hosts_group",
|
||||
"ComputeOvsDpdk")
|
||||
self.hw_offload_hosts_group = self.config.get("hw_offload_hosts_group",
|
||||
"ComputeSriovOffload")
|
||||
self.network_vlan_number = 1001
|
||||
|
||||
tripleo_inventory_file_path = self.config.get("tripleo_inventory_file",
|
||||
"/home/stack/browbeat/ansible/hosts.yml")
|
||||
with open(tripleo_inventory_file_path, "r") as tripleo_inventory_file:
|
||||
self.tripleo_inventory = yaml.safe_load(tripleo_inventory_file)
|
||||
|
||||
dpdk_and_hw_offload_hosts = []
|
||||
|
||||
if self.context["boot_dpdk_vms"]:
|
||||
self.dpdk_aggregate = self.nova_wrapper.aggregates.create(
|
||||
"dpdk_aggregate", "az_dpdk")
|
||||
dpdk_hosts = self.tripleo_inventory[self.dpdk_hosts_group]["hosts"]
|
||||
self.context["num_dpdk_compute_hosts"] = len(dpdk_hosts)
|
||||
for host_details in dpdk_hosts.values():
|
||||
self.nova_wrapper.aggregates.add_host(self.dpdk_aggregate.id,
|
||||
host_details["canonical_hostname"])
|
||||
dpdk_and_hw_offload_hosts.append(host_details["canonical_hostname"])
|
||||
|
||||
if self.context["boot_hw_offload_vms"]:
|
||||
self.hw_offload_aggregate = self.nova_wrapper.aggregates.create(
|
||||
"hw_offload_aggregate", "az_hw_offload")
|
||||
hw_offload_hosts = self.tripleo_inventory[
|
||||
self.hw_offload_hosts_group]["hosts"]
|
||||
self.context["num_hw_offload_compute_hosts"] = len(hw_offload_hosts)
|
||||
for host_details in hw_offload_hosts.values():
|
||||
self.nova_wrapper.aggregates.add_host(self.hw_offload_aggregate.id,
|
||||
host_details["canonical_hostname"])
|
||||
dpdk_and_hw_offload_hosts.append(host_details["canonical_hostname"])
|
||||
|
||||
self.vanilla_compute_aggregate = self.nova_wrapper.aggregates.create(
|
||||
"vanilla_compute_aggregate", "az_vanilla_compute")
|
||||
|
||||
self.vanilla_compute_hosts = set()
|
||||
for hostsgroup in self.tripleo_inventory:
|
||||
if "hosts" in self.tripleo_inventory[hostsgroup] and "Compute" in hostsgroup:
|
||||
for host_details in self.tripleo_inventory[hostsgroup]["hosts"].values():
|
||||
if ("canonical_hostname" in host_details and
|
||||
host_details["canonical_hostname"] not in dpdk_and_hw_offload_hosts):
|
||||
self.nova_wrapper.aggregates.add_host(
|
||||
self.vanilla_compute_aggregate.id,
|
||||
host_details["canonical_hostname"])
|
||||
self.vanilla_compute_hosts.add(host_details["canonical_hostname"])
|
||||
self.context["num_vanilla_compute_hosts"] = len(self.vanilla_compute_hosts)
|
||||
|
||||
for _, tenant_id in utils.iterate_per_tenants(
|
||||
self.context.get("users", [])
|
||||
):
|
||||
self._create_networks_and_subnets(tenant_id)
|
||||
|
||||
def cleanup(self):
|
||||
"""This method is called after the task finishes."""
|
||||
for hostname in self.vanilla_compute_hosts:
|
||||
self.nova_wrapper.aggregates.remove_host(self.vanilla_compute_aggregate.id,
|
||||
hostname)
|
||||
self.nova_wrapper.aggregates.delete(self.vanilla_compute_aggregate.id)
|
||||
|
||||
if self.context["boot_dpdk_vms"]:
|
||||
dpdk_hosts = self.tripleo_inventory[self.dpdk_hosts_group]["hosts"]
|
||||
for host_details in dpdk_hosts.values():
|
||||
self.nova_wrapper.aggregates.remove_host(self.dpdk_aggregate.id,
|
||||
host_details["canonical_hostname"])
|
||||
self.nova_wrapper.aggregates.delete(self.dpdk_aggregate.id)
|
||||
|
||||
if self.context["boot_hw_offload_vms"]:
|
||||
hw_offload_hosts = self.tripleo_inventory[
|
||||
self.hw_offload_hosts_group]["hosts"]
|
||||
for host_details in hw_offload_hosts.values():
|
||||
self.nova_wrapper.aggregates.remove_host(self.hw_offload_aggregate.id,
|
||||
host_details["canonical_hostname"])
|
||||
self.nova_wrapper.aggregates.delete(self.hw_offload_aggregate.id)
|
||||
|
||||
for subnet in self.context["vanilla_subnets"].values():
|
||||
try:
|
||||
subnet_id = subnet["id"]
|
||||
self.net_wrapper._delete_subnet(subnet_id)
|
||||
LOG.debug(
|
||||
"Subnet with id '%s' deleted from context"
|
||||
% subnet_id
|
||||
)
|
||||
except Exception as e:
|
||||
msg = "Can't delete subnet {} from context: {}".format(
|
||||
subnet_id, e
|
||||
)
|
||||
LOG.warning(msg)
|
||||
|
||||
for network in self.context["vanilla_networks"].values():
|
||||
try:
|
||||
network_id = network["id"]
|
||||
self.net_wrapper.delete_network(network)
|
||||
LOG.debug(
|
||||
"Network with id '%s' deleted from context"
|
||||
% network_id
|
||||
)
|
||||
except Exception as e:
|
||||
msg = "Can't delete network {} from context: {}".format(
|
||||
network_id, e
|
||||
)
|
||||
LOG.warning(msg)
|
||||
|
||||
for subnets in self.context["nfv_subnets"].values():
|
||||
for subnet in subnets.values():
|
||||
try:
|
||||
nfv_subnet_id = subnet["id"]
|
||||
self.net_wrapper._delete_subnet(nfv_subnet_id)
|
||||
LOG.debug(
|
||||
"Subnet with id '%s' deleted from context"
|
||||
% nfv_subnet_id
|
||||
)
|
||||
except Exception as e:
|
||||
msg = "Can't delete subnet {} from context: {}".format(
|
||||
nfv_subnet_id, e
|
||||
)
|
||||
LOG.warning(msg)
|
||||
|
||||
for networks in self.context["nfv_networks"].values():
|
||||
for network in networks.values():
|
||||
try:
|
||||
nfv_network_id = network["id"]
|
||||
self.net_wrapper.delete_network(network)
|
||||
LOG.debug(
|
||||
"Network with id '%s' deleted from context"
|
||||
% nfv_network_id
|
||||
)
|
||||
except Exception as e:
|
||||
msg = "Can't delete network {} from context: {}".format(
|
||||
nfv_network_id, e
|
||||
)
|
||||
LOG.warning(msg)
|
@ -15,11 +15,15 @@ import time
|
||||
from rally_openstack.common import consts
|
||||
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
|
||||
from rally_openstack.task.scenarios.nova import utils as nova_utils
|
||||
from rally_openstack.task.scenarios.neutron import utils as neutron_utils
|
||||
from rally_openstack.task.scenarios.vm import utils as vm_utils
|
||||
from rally.task import scenario
|
||||
from rally.task import types
|
||||
from rally.task import validation
|
||||
|
||||
from rally_openstack.common.services.network import neutron
|
||||
from rally_openstack.common import osclients
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"})
|
||||
@ -33,6 +37,166 @@ class NovaBootPersist(nova_utils.NovaScenario):
|
||||
def run(self, image, flavor, **kwargs):
|
||||
self._boot_server(image, flavor)
|
||||
|
||||
@types.convert(image={"type": "glance_image"}, vanilla_flavor={"type": "nova_flavor"},
|
||||
dpdk_flavor={"type": "nova_flavor"}, hw_offload_flavor={"type": "nova_flavor"})
|
||||
@validation.add("image_valid_on_flavor", flavor_param="vanilla_flavor", image_param="image")
|
||||
@validation.add("image_valid_on_flavor", flavor_param="dpdk_flavor", image_param="image")
|
||||
@validation.add("image_valid_on_flavor", flavor_param="hw_offload_flavor", image_param="image")
|
||||
@validation.add("required_contexts", contexts=("create_nfv_azs_and_networks"))
|
||||
@validation.add("required_services", services=[consts.Service.NEUTRON, consts.Service.NOVA])
|
||||
@validation.add("required_platform", platform="openstack", users=True)
|
||||
@scenario.configure(context={"cleanup@openstack": ["neutron", "nova"]},
|
||||
name="BrowbeatNova.nova_boot_hybrid_computes", platform="openstack")
|
||||
class NovaBootHybridComputes(nova_utils.NovaScenario, neutron_utils.NeutronScenario):
|
||||
|
||||
def run(self, image, vanilla_flavor, dpdk_flavor,
|
||||
hw_offload_flavor, num_networks_per_tenant,
|
||||
dpdk_management_nw_type, hw_offload_management_nw_type,
|
||||
proportional_scale, **kwargs):
|
||||
"""Create VMs on non-NFV compute nodes, SR-IOV+DPDK compute nodes,
|
||||
and SR-IOV+Hardware Offload compute nodes.
|
||||
:param image: image ID or instance for server creation
|
||||
:param vanilla_flavor: flavor ID or instance for server creation
|
||||
:param dpdk_flavor: flavor ID or instance for SR-IOV+DPDK VM creation
|
||||
:param hw_offload_flavor: flavor ID or instance for SR-IOV+Hardware Offload VM creation
|
||||
:param num_networks_per_tenant: int, number of tunnel networks per tenant
|
||||
:param dpdk_management_nw_type: str, management network for DPDK VMs
|
||||
:param hw_offload_management_nw_type: str, management network for HW Offload VMs
|
||||
:param proportional_scale: str, option to scale VMs proportionately
|
||||
"""
|
||||
tenant_id = self.context["tenant"]["id"]
|
||||
minimum_num_compute_hosts = min(self.context["num_vanilla_compute_hosts"],
|
||||
self.context.get("num_dpdk_compute_hosts",
|
||||
self.context[
|
||||
"num_vanilla_compute_hosts"]),
|
||||
self.context.get("num_hw_offload_compute_hosts",
|
||||
self.context[
|
||||
"num_vanilla_compute_hosts"]))
|
||||
|
||||
vanilla_provider_network = self.context["vanilla_networks"][tenant_id]
|
||||
tenant_network_id = self.context["tenant"]["networks"][((self.context["iteration"]-1)
|
||||
% num_networks_per_tenant)]["id"]
|
||||
LOG.info("ITER {} using tenant network {}".format(self.context["iteration"],
|
||||
tenant_network_id))
|
||||
kwargs["nics"] = [{'net-id': vanilla_provider_network["id"]}]
|
||||
kwargs["availability-zone"] = "az_vanilla_compute"
|
||||
|
||||
proportional_scale = proportional_scale in ["True", "true"]
|
||||
if proportional_scale:
|
||||
num_vanilla_vms_to_boot = self.context[
|
||||
"num_vanilla_compute_hosts"] // minimum_num_compute_hosts
|
||||
if self.context["boot_dpdk_vms"]:
|
||||
num_dpdk_vms_to_boot = self.context[
|
||||
"num_dpdk_compute_hosts"] // minimum_num_compute_hosts
|
||||
if self.context["boot_hw_offload_vms"]:
|
||||
num_sriov_hw_offload_vms_to_boot = self.context[
|
||||
"num_hw_offload_compute_hosts"] // minimum_num_compute_hosts
|
||||
else:
|
||||
num_vanilla_vms_to_boot = 1
|
||||
if self.context["boot_dpdk_vms"]:
|
||||
num_dpdk_vms_to_boot = 1
|
||||
if self.context["boot_hw_offload_vms"]:
|
||||
num_sriov_hw_offload_vms_to_boot = 1
|
||||
|
||||
for _ in range(num_vanilla_vms_to_boot):
|
||||
vanilla_server = self._boot_server(image, vanilla_flavor, **kwargs)
|
||||
self._attach_interface(vanilla_server, net_id=tenant_network_id)
|
||||
LOG.info("ITER {} Booted vanilla server : {}".format(self.context["iteration"],
|
||||
vanilla_server.id))
|
||||
# VMs booting simultaneously across iterations adds a lot of load, so delay booting VMs
|
||||
# for 5 seconds.
|
||||
time.sleep(5)
|
||||
|
||||
if self.context["boot_dpdk_vms"]:
|
||||
LOG.info("ITER {} DPDK instances enabled.".format(self.context["iteration"]))
|
||||
|
||||
dpdk_server_kwargs = {}
|
||||
dpdk_networks = self.context["nfv_networks"]["dpdk"]
|
||||
|
||||
if dpdk_management_nw_type == "sriov":
|
||||
sriov_port_kwargs = {}
|
||||
sriov_networks = self.context["nfv_networks"]["sriov"]
|
||||
sriov_network = sriov_networks[tenant_id]
|
||||
sriov_port_kwargs["binding:vnic_type"] = "direct"
|
||||
|
||||
for _ in range(num_dpdk_vms_to_boot):
|
||||
if dpdk_management_nw_type == "sriov":
|
||||
sriov_port = self._create_port({"network": sriov_network}, sriov_port_kwargs)
|
||||
dpdk_server_kwargs["nics"] = [{'port-id': sriov_port["port"]["id"]},
|
||||
{'net-id': dpdk_networks[tenant_id]["id"]}]
|
||||
elif dpdk_management_nw_type == "tenant":
|
||||
dpdk_server_kwargs["nics"] = [{'net-id': tenant_network_id},
|
||||
{'net-id': dpdk_networks[tenant_id]["id"]}]
|
||||
else:
|
||||
raise Exception("{} is not a valid management network type. {}".format(
|
||||
dpdk_management_nw_type, "Please choose sriov or tenant."))
|
||||
dpdk_server_kwargs["availability-zone"] = "az_dpdk"
|
||||
dpdk_server = self._boot_server(image, dpdk_flavor,
|
||||
**dpdk_server_kwargs)
|
||||
LOG.info("ITER {} Booted DPDK server : {}".format(self.context["iteration"],
|
||||
dpdk_server.id))
|
||||
# VMs booting simultaneously across iterations adds a lot of load,
|
||||
# so delay booting VMs for 5 seconds.
|
||||
time.sleep(5)
|
||||
|
||||
if self.context["boot_hw_offload_vms"]:
|
||||
LOG.info("ITER {} Hardware Offload Instances enabled.".format(
|
||||
self.context["iteration"]))
|
||||
|
||||
hw_offload_server_kwargs = {}
|
||||
hw_offload_network = self.context["nfv_networks"]["hw_offload"][tenant_id]
|
||||
hw_offload_subnet = self.context["nfv_subnets"]["hw_offload"][tenant_id]
|
||||
|
||||
if hw_offload_management_nw_type == "sriov":
|
||||
sriov_networks = self.context["nfv_networks"]["sriov"]
|
||||
sriov_network = sriov_networks[tenant_id]
|
||||
sriov_port_kwargs = {}
|
||||
sriov_port_kwargs["binding:vnic_type"] = "direct"
|
||||
|
||||
admin_clients = osclients.Clients(self.context["admin"]["credential"])
|
||||
self.admin_neutron = neutron.NeutronService(
|
||||
clients=admin_clients,
|
||||
name_generator=self.generate_random_name,
|
||||
atomic_inst=self.atomic_actions()
|
||||
)
|
||||
|
||||
hw_offload_port_kwargs = {}
|
||||
hw_offload_port_kwargs["binding:vnic_type"] = "direct"
|
||||
hw_offload_port_kwargs["fixed_ips"] = [{"subnet_id": hw_offload_subnet["id"]}]
|
||||
|
||||
for _ in range(num_sriov_hw_offload_vms_to_boot):
|
||||
if hw_offload_management_nw_type == "sriov":
|
||||
sriov_port = self._create_port({"network": sriov_network}, sriov_port_kwargs)
|
||||
hw_offload_port = self._create_port({"network": hw_offload_network},
|
||||
hw_offload_port_kwargs)
|
||||
|
||||
hw_offload_port_kwargs["binding:profile"] = {"capabilities": ["switchdev"]}
|
||||
hw_offload_port = {"port": self.admin_neutron.update_port(
|
||||
port_id=hw_offload_port["port"]["id"],
|
||||
**hw_offload_port_kwargs)}
|
||||
|
||||
if hw_offload_management_nw_type == "sriov":
|
||||
hw_offload_server_kwargs["nics"] = [{'port-id': sriov_port["port"]["id"]},
|
||||
{'port-id':
|
||||
hw_offload_port["port"]["id"]}]
|
||||
elif hw_offload_management_nw_type == "tenant":
|
||||
hw_offload_server_kwargs["nics"] = [{'net-id': tenant_network_id},
|
||||
{'port-id':
|
||||
hw_offload_port["port"]["id"]}]
|
||||
else:
|
||||
raise Exception("{} is not a valid management network type. {}".format(
|
||||
hw_offload_management_nw_type,
|
||||
"Please choose sriov or tenant."))
|
||||
|
||||
hw_offload_server_kwargs["availability-zone"] = "az_hw_offload"
|
||||
hw_offload_server = self._boot_server(image, hw_offload_flavor,
|
||||
**hw_offload_server_kwargs)
|
||||
LOG.info("ITER {} Booted Hardware Offload server : {}".format(
|
||||
self.context["iteration"], hw_offload_server.id))
|
||||
|
||||
# VMs booting simultaneously across iterations adds a lot of load,
|
||||
# so delay booting VMs for 5 seconds.
|
||||
time.sleep(5)
|
||||
|
||||
@types.convert(image={"type": "glance_image"}, flavor={"type": "nova_flavor"})
|
||||
@validation.add("image_valid_on_flavor", flavor_param="flavor", image_param="image")
|
||||
|
66
rally/rally-plugins/nova/nova_boot_hybrid_computes.yml
Normal file
66
rally/rally-plugins/nova/nova_boot_hybrid_computes.yml
Normal file
@ -0,0 +1,66 @@
|
||||
{% set image_name = image_name or 'centos7' %}
|
||||
{% set vanilla_flavor_name = vanilla_flavor_name or 'm1.small' %}
|
||||
{% set dpdk_flavor_name = dpdk_flavor_name or 'm1.small' %}
|
||||
{% set hw_offload_flavor_name = hw_offload_flavor_name or 'm1.small' %}
|
||||
{% set proportional_scale = proportional_scale or 'False' %}
|
||||
{% set nova_api_version = nova_api_version or 2.74 %}
|
||||
{% set num_tenants = num_tenants or 1 %}
|
||||
{% set num_networks_per_tenant = num_networks_per_tenant or 1 %}
|
||||
{% set sla_max_avg_duration = sla_max_avg_duration or 60 %}
|
||||
{% set sla_max_failure = sla_max_failure or 0 %}
|
||||
{% set sla_max_seconds = sla_max_seconds or 60 %}
|
||||
---
|
||||
BrowbeatNova.nova_boot_hybrid_computes:
|
||||
-
|
||||
args:
|
||||
image:
|
||||
name: '{{ image_name }}'
|
||||
vanilla_flavor:
|
||||
name: '{{ vanilla_flavor_name }}'
|
||||
dpdk_flavor:
|
||||
name: '{{ dpdk_flavor_name }}'
|
||||
hw_offload_flavor:
|
||||
name: '{{ hw_offload_flavor_name }}'
|
||||
dpdk_management_nw_type: '{{ dpdk_management_nw_type }}'
|
||||
hw_offload_management_nw_type: '{{ hw_offload_management_nw_type }}'
|
||||
proportional_scale: '{{ proportional_scale }}'
|
||||
num_networks_per_tenant: {{ num_networks_per_tenant }}
|
||||
runner:
|
||||
concurrency: {{concurrency}}
|
||||
times: {{times}}
|
||||
type: 'constant'
|
||||
context:
|
||||
create_nfv_azs_and_networks:
|
||||
provider_phys_nets:
|
||||
dpdk: '{{ dpdk_phys_net }}'
|
||||
sriov: '{{ sriov_phys_net }}'
|
||||
hw_offload: '{{ hw_offload_phys_net }}'
|
||||
vanilla: '{{ vanilla_phys_net }}'
|
||||
boot_dpdk_vms: '{{ boot_dpdk_vms }}'
|
||||
boot_hw_offload_vms: '{{ boot_hw_offload_vms }}'
|
||||
dpdk_hosts_group: '{{ dpdk_hosts_group }}'
|
||||
hw_offload_hosts_group: '{{ hw_offload_hosts_group }}'
|
||||
tripleo_inventory_file: '{{ tripleo_inventory_file }}'
|
||||
api_versions:
|
||||
nova:
|
||||
version: {{ nova_api_version }}
|
||||
users:
|
||||
tenants: {{ num_tenants }}
|
||||
users_per_tenant: 8
|
||||
network:
|
||||
networks_per_tenant: {{num_networks_per_tenant}}
|
||||
quotas:
|
||||
neutron:
|
||||
network: -1
|
||||
port: -1
|
||||
subnet: -1
|
||||
router: -1
|
||||
nova:
|
||||
instances: -1
|
||||
cores: -1
|
||||
ram: -1
|
||||
sla:
|
||||
max_avg_duration: {{sla_max_avg_duration}}
|
||||
max_seconds_per_iteration: {{sla_max_seconds}}
|
||||
failure_rate:
|
||||
max: {{sla_max_failure}}
|
Loading…
Reference in New Issue
Block a user