[Scenario] Split Scenarios - P3

Move under plugins/openstack:
    * Sahara
    * Swift
    * Glance
    * Cinder

Implements: blueprint split-plugins

Change-Id: I095d4efe181308feffbd49c59a1c52fa2ce0ca59
This commit is contained in:
Yair Fried 2015-05-17 15:46:09 +03:00
parent 24943b7353
commit a92abc997f
33 changed files with 4347 additions and 4 deletions

View File

@ -16,7 +16,6 @@
from oslo_config import cfg
from rally.benchmark.context import base
from rally.benchmark.scenarios.sahara import utils
from rally.benchmark import utils as bench_utils
from rally.common.i18n import _
from rally.common import log as logging
@ -25,6 +24,7 @@ from rally import consts
from rally import exceptions
from rally import osclients
from rally.plugins.openstack.context.cleanup import manager as resource_manager
from rally.plugins.openstack.scenarios.sahara import utils
CONF = cfg.CONF

View File

@ -13,7 +13,6 @@
# under the License.
from rally.benchmark.context import base
from rally.benchmark.scenarios.glance import utils as glance_utils
from rally.common.i18n import _
from rally.common import log as logging
from rally.common import utils as rutils
@ -21,6 +20,7 @@ from rally import consts
from rally import exceptions
from rally import osclients
from rally.plugins.openstack.context.cleanup import manager as resource_manager
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
LOG = logging.getLogger(__name__)

View File

@ -0,0 +1,261 @@
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
import time
from oslo_config import cfg
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
CINDER_BENCHMARK_OPTS = [
cfg.FloatOpt("cinder_volume_create_prepoll_delay",
default=2.0,
help="Time to sleep after creating a resource before"
" polling for it status"),
cfg.FloatOpt("cinder_volume_create_timeout",
default=600.0,
help="Time to wait for cinder volume to be created."),
cfg.FloatOpt("cinder_volume_create_poll_interval",
default=2.0,
help="Interval between checks when waiting for volume"
" creation."),
cfg.FloatOpt("cinder_volume_delete_timeout",
default=600.0,
help="Time to wait for cinder volume to be deleted."),
cfg.FloatOpt("cinder_volume_delete_poll_interval",
default=2.0,
help="Interval between checks when waiting for volume"
" deletion.")
]
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(CINDER_BENCHMARK_OPTS, group=benchmark_group)
class CinderScenario(base.Scenario):
"""Base class for Cinder scenarios with basic atomic actions."""
RESOURCE_NAME_PREFIX = "rally_volume_"
@base.atomic_action_timer("cinder.list_volumes")
def _list_volumes(self, detailed=True):
"""Returns user volumes list."""
return self.clients("cinder").volumes.list(detailed)
@base.atomic_action_timer("cinder.list_snapshots")
def _list_snapshots(self, detailed=True):
"""Returns user snapshots list."""
return self.clients("cinder").volume_snapshots.list(detailed)
@base.atomic_action_timer("cinder.create_volume")
def _create_volume(self, size, **kwargs):
"""Create one volume.
Returns when the volume is actually created and is in the "Available"
state.
:param size: int be size of volume in GB, or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param kwargs: Other optional parameters to initialize the volume
:returns: Created volume object
"""
kwargs["display_name"] = kwargs.get("display_name",
self._generate_random_name())
if isinstance(size, dict):
size = random.randint(size["min"], size["max"])
volume = self.clients("cinder").volumes.create(size, **kwargs)
# NOTE(msdubov): It is reasonable to wait 5 secs before starting to
# check whether the volume is ready => less API calls.
time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay)
volume = bench_utils.wait_for(
volume,
is_ready=bench_utils.resource_is("available"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
return volume
@base.atomic_action_timer("cinder.delete_volume")
def _delete_volume(self, volume):
"""Delete the given volume.
Returns when the volume is actually deleted.
:param volume: volume object
"""
volume.delete()
bench_utils.wait_for_delete(
volume,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
)
@base.atomic_action_timer("cinder.extend_volume")
def _extend_volume(self, volume, new_size):
"""Extend the given volume.
Returns when the volume is actually extended.
:param volume: volume object
:param new_size: new volume size in GB, or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Notice: should be bigger volume size
"""
if isinstance(new_size, dict):
new_size = random.randint(new_size["min"], new_size["max"])
volume.extend(volume, new_size)
volume = bench_utils.wait_for(
volume,
is_ready=bench_utils.resource_is("available"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
@base.atomic_action_timer("cinder.upload_volume_to_image")
def _upload_volume_to_image(self, volume, force=False,
container_format="bare", disk_format="raw"):
"""Upload the given volume to image.
Returns created image.
:param volume: volume object
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param: disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi
and iso
:returns: Returns created image object
"""
resp, img = volume.upload_to_image(force, self._generate_random_name(),
container_format, disk_format)
# NOTE (e0ne): upload_to_image changes volume status to uploading so
# we need to wait until it will be available.
volume = bench_utils.wait_for(
volume,
is_ready=bench_utils.resource_is("available"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
image_id = img["os-volume_upload_image"]["image_id"]
image = self.clients("glance").images.get(image_id)
image = bench_utils.wait_for(
image,
is_ready=bench_utils.resource_is("active"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.glance_image_create_prepoll_delay,
check_interval=CONF.benchmark.glance_image_create_poll_interval
)
return image
@base.atomic_action_timer("cinder.create_snapshot")
def _create_snapshot(self, volume_id, force=False, **kwargs):
"""Create one snapshot.
Returns when the snapshot is actually created and is in the "Available"
state.
:param volume_id: volume uuid for creating snapshot
:param force: flag to indicate whether to snapshot a volume even if
it's attached to an instance
:param kwargs: Other optional parameters to initialize the volume
:returns: Created snapshot object
"""
kwargs["display_name"] = kwargs.get("display_name",
self._generate_random_name())
kwargs["force"] = force
snapshot = self.clients("cinder").volume_snapshots.create(volume_id,
**kwargs)
time.sleep(CONF.benchmark.cinder_volume_create_prepoll_delay)
snapshot = bench_utils.wait_for(
snapshot,
is_ready=bench_utils.resource_is("available"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
return snapshot
@base.atomic_action_timer("cinder.delete_snapshot")
def _delete_snapshot(self, snapshot):
"""Delete the given snapshot.
Returns when the snapshot is actually deleted.
:param snapshot: snapshot object
"""
snapshot.delete()
bench_utils.wait_for_delete(
snapshot,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
)
@base.atomic_action_timer("cinder.create_backup")
def _create_backup(self, volume_id, **kwargs):
"""Create a volume backup of the given volume.
:param volume_id: The ID of the volume to backup.
:param kwargs: Other optional parameters
"""
backup = self.clients("cinder").backups.create(volume_id, **kwargs)
return bench_utils.wait_for(
backup,
is_ready=bench_utils.resource_is("available"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
@base.atomic_action_timer("cinder.delete_backup")
def _delete_backup(self, backup):
"""Delete the given backup.
Returns when the backup is actually deleted.
:param backup: backup instance
"""
backup.delete()
bench_utils.wait_for_delete(
backup,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.cinder_volume_delete_timeout,
check_interval=CONF.benchmark.cinder_volume_delete_poll_interval
)
def get_random_server(self):
server_id = random.choice(self.context["tenant"]["servers"])
return self.clients("nova").servers.get(server_id)

View File

@ -0,0 +1,433 @@
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.benchmark.scenarios import base
from rally.benchmark.scenarios.nova import utils as nova_utils
from rally.benchmark import types as types
from rally.benchmark import validation
from rally.common import log as logging
from rally import consts
from rally.plugins.openstack.scenarios.cinder import utils
from rally.plugins.openstack.scenarios.glance import utils as glance_utils
LOG = logging.getLogger(__name__)
class CinderVolumes(utils.CinderScenario,
nova_utils.NovaScenario,
glance_utils.GlanceScenario):
"""Benchmark scenarios for Cinder Volumes."""
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_and_list_volume(self, size, detailed=True,
image=None, **kwargs):
"""Create a volume and list all volumes.
Measure the "cinder volume-list" command performance.
If you have only 1 user in your context, you will
add 1 volume on every iteration. So you will have more
and more volumes and will be able to measure the
performance of the "cinder volume-list" command depending on
the number of images owned by users.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param detailed: determines whether the volume listing should contain
detailed information about all of them
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
self._list_volumes(detailed)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def list_volumes(self, detailed=True):
"""List all volumes.
This simple scenario tests the cinder list command by listing
all the volumes.
:param detailed: True if detailed information about volumes
should be listed
"""
self._list_volumes(detailed)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_and_delete_volume(self, size, image=None,
min_sleep=0, max_sleep=0,
**kwargs):
"""Create and then delete a volume.
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
and 'max_sleep' parameters allow the scenario to simulate a pause
between volume creation and deletion (of random duration from
[min_sleep, max_sleep]).
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self._create_volume(size, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@types.set(image=types.ImageResourceType)
@validation.image_exists("image", nullable=True)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_volume(self, size, image=None, **kwargs):
"""Create a volume.
Good test to check how influence amount of active volumes on
performance of creating new.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self._create_volume(size, **kwargs)
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_and_extend_volume(self, size, new_size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and extend a volume and then delete it.
:param size: volume size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param new_size: volume new size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
to extend.
Notice: should be bigger volume size
:param min_sleep: minimum sleep time between volume extension and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume extension and
deletion (in seconds)
:param kwargs: optional args to extend the volume
"""
volume = self._create_volume(size, **kwargs)
self._extend_volume(volume, new_size)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_from_volume_and_delete_volume(self, size, min_sleep=0,
max_sleep=0, **kwargs):
"""Create volume from volume and then delete it.
Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'
parameters allow the scenario to simulate a pause between volume
creation and deletion (of random duration from [min_sleep, max_sleep]).
:param size: volume size (in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Should be equal or bigger source volume size
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
source_vol = random.choice(self.context["tenant"]["volumes"])
volume = self._create_volume(size, source_volid=source_vol["id"],
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_and_delete_snapshot(self, force=False, min_sleep=0,
max_sleep=0, **kwargs):
"""Create and then delete a volume-snapshot.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between snapshot creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param min_sleep: minimum sleep time between snapshot creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between snapshot creation and
deletion (in seconds)
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self._create_snapshot(volume["id"], force=force, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self._delete_snapshot(snapshot)
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.image_valid_on_flavor("flavor", "image")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder", "nova"]})
def create_and_attach_volume(self, size, image, flavor, **kwargs):
"""Create a VM and attach a volume to it.
Simple test to create a VM and attach a volume, then
detach the volume and delete volume/VM.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param kwargs: optional arguments for VM creation
"""
server = self._boot_server(image, flavor, **kwargs)
volume = self._create_volume(size)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_volume(volume)
self._delete_server(server)
@validation.volume_type_exists("volume_type")
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder", "nova"]})
def create_snapshot_and_attach_volume(self, volume_type=False,
size=None, **kwargs):
"""Create volume, snapshot and attach/detach volume.
This scenario is based off of the standalone qaStressTest.py
(https://github.com/WaltHP/cinder-stress).
:param volume_type: Whether or not to specify volume type when creating
volumes.
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if "min_size" in kwargs or "max_size" in kwargs:
import warnings
warnings.warn("'min_size' and 'max_size' arguments "
"are deprecated. You should use 'size', with "
"keys 'min' and 'max' instead.")
if "volume_size" in kwargs:
import warnings
warnings.warn("'volume_size' argument is deprecated. You should "
"use 'size' instead.")
size = kwargs["volume_size"]
if size is None:
size = {"min": 1, "max": 5}
selected_type = None
volume_types = [None]
if volume_type:
volume_types_list = self.clients("cinder").volume_types.list()
for s in volume_types_list:
volume_types.append(s.name)
selected_type = random.choice(volume_types)
volume = self._create_volume(size, volume_type=selected_type)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.NOVA, consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder", "nova"]})
def create_nested_snapshots_and_attach_volume(self,
size=None,
nested_level=None,
**kwargs):
"""Create a volume from snapshot and attach/detach the volume
This scenario create volume, create it's snapshot, attach volume,
then create new volume from existing snapshot and so on,
with defined nested level, after all detach and delete them.
volume->snapshot->volume->snapshot->volume ...
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param nested_level: Nested level - dictionary, contains two values:
min - minimum number of volumes will be created
from snapshot;
max - maximum number of volumes will be created
from snapshot.
default values: {"min": 5, "max": 10}
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if "volume_size" in kwargs:
import warnings
warnings.warn("'volume_size' argument is deprecated. You should "
"use 'size' instead.")
size = kwargs["volume_size"]
if size is None:
size = {"min": 1, "max": 5}
if nested_level is None:
nested_level = {"min": 5, "max": 10}
nested_level = random.randint(nested_level["min"], nested_level["max"])
source_vol = self._create_volume(size)
nes_objs = [(self.get_random_server(), source_vol,
self._create_snapshot(source_vol.id, False, **kwargs))]
self._attach_volume(nes_objs[0][0], nes_objs[0][1])
snapshot = nes_objs[0][2]
for i in range(nested_level - 1):
volume = self._create_volume(size, snapshot_id=snapshot.id)
snapshot = self._create_snapshot(volume.id, False, **kwargs)
server = self.get_random_server()
self._attach_volume(server, volume)
nes_objs.append((server, volume, snapshot))
nes_objs.reverse()
for server, volume, snapshot in nes_objs:
self._detach_volume(server, volume)
self._delete_snapshot(snapshot)
self._delete_volume(volume)
@validation.required_services(consts.Service.CINDER)
@validation.required_contexts("volumes")
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_and_list_snapshots(self, force=False, detailed=True, **kwargs):
"""Create and then list a volume-snapshot.
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param detailed: True if detailed information about snapshots
should be listed
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
self._create_snapshot(volume["id"], force=force, **kwargs)
self._list_snapshots(detailed)
@validation.required_services(consts.Service.CINDER, consts.Service.GLANCE)
@validation.required_openstack(users=True)
@validation.required_parameters("size")
@base.scenario(context={"cleanup": ["cinder", "glance"]})
def create_and_upload_volume_to_image(self, size, force=False,
container_format="bare",
disk_format="raw",
do_delete=True,
**kwargs):
"""Create and upload a volume to image.
:param size: volume size (integers, in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param force: when set to True volume that is attached to an instance
could be uploaded to image
:param container_format: image container format
:param disk_format: disk format for image
:param do_delete: deletes image and volume after uploading if True
:param kwargs: optional args to create a volume
"""
volume = self._create_volume(size, **kwargs)
image = self._upload_volume_to_image(volume, force, container_format,
disk_format)
if do_delete:
self._delete_volume(volume)
self._delete_image(image)
@validation.required_cinder_services("cinder-backup")
@validation.required_services(consts.Service.CINDER)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["cinder"]})
def create_volume_backup(self, size, do_delete=True, **kwargs):
"""Create a volume backup.
:param size: volume size in GB
:param do_delete: if True, a volume and a volume backup will
be deleted after creation.
:param kwargs: optional args to create a volume backup
"""
volume = self._create_volume(size, **kwargs)
backup = self._create_backup(volume.id, **kwargs)
if do_delete:
self._delete_volume(volume)
self._delete_backup(backup)

View File

@ -0,0 +1,120 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark.scenarios.nova import utils as nova_utils
from rally.benchmark import types as types
from rally.benchmark import validation
from rally import consts
from rally.plugins.openstack.scenarios.glance import utils
class GlanceImages(utils.GlanceScenario, nova_utils.NovaScenario):
"""Benchmark scenarios for Glance images."""
RESOURCE_NAME_PREFIX = "rally_image_"
RESOURCE_NAME_LENGTH = 16
@validation.required_services(consts.Service.GLANCE)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["glance"]})
def create_and_list_image(self, container_format,
image_location, disk_format, **kwargs):
"""Add an image and then list all images.
Measure the "glance image-list" command performance.
If you have only 1 user in your context, you will
add 1 image on every iteration. So you will have more
and more images and will be able to measure the
performance of the "glance image-list" command depending on
the number of images owned by users.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param kwargs: optional parameters to create image
"""
self._create_image(container_format,
image_location,
disk_format,
**kwargs)
self._list_images()
@validation.required_services(consts.Service.GLANCE)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["glance"]})
def list_images(self):
"""List all images.
This simple scenario tests the glance image-list command by listing
all the images.
Suppose if we have 2 users in context and each has 2 images
uploaded for them we will be able to test the performance of
glance image-list command in this case.
"""
self._list_images()
@validation.required_services(consts.Service.GLANCE)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["glance"]})
def create_and_delete_image(self, container_format,
image_location, disk_format, **kwargs):
"""Add and then delete an image.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param kwargs: optional parameters to create image
"""
image = self._create_image(container_format,
image_location,
disk_format,
**kwargs)
self._delete_image(image)
@types.set(flavor=types.FlavorResourceType)
@validation.flavor_exists("flavor")
@validation.required_services(consts.Service.GLANCE, consts.Service.NOVA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["glance", "nova"]})
def create_image_and_boot_instances(self, container_format,
image_location, disk_format,
flavor, number_instances,
**kwargs):
"""Add an image and boot several instances from it.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param flavor: Nova flavor to be used to launch an instance
:param number_instances: number of Nova servers to boot
:param kwargs: optional parameters to create server
"""
image = self._create_image(container_format,
image_location,
disk_format)
image_id = image.id
server_name = self._generate_random_name(prefix="rally_novaserver_")
self._boot_servers(server_name, image_id,
flavor, number_instances, **kwargs)

View File

@ -0,0 +1,125 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_config import cfg
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
GLANCE_BENCHMARK_OPTS = [
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
help="Time to sleep after creating a resource before "
"polling for it status"),
cfg.FloatOpt("glance_image_create_timeout",
default=120.0,
help="Time to wait for glance image to be created."),
cfg.FloatOpt("glance_image_create_poll_interval",
default=1.0,
help="Interval between checks when waiting for image "
"creation."),
cfg.FloatOpt("glance_image_delete_timeout",
default=120.0,
help="Time to wait for glance image to be deleted."),
cfg.FloatOpt("glance_image_delete_poll_interval",
default=1.0,
help="Interval between checks when waiting for image "
"deletion.")
]
CONF = cfg.CONF
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(GLANCE_BENCHMARK_OPTS, group=benchmark_group)
class GlanceScenario(base.Scenario):
"""Base class for Glance scenarios with basic atomic actions."""
@base.atomic_action_timer("glance.list_images")
def _list_images(self):
"""Returns user images list."""
return list(self.clients("glance").images.list())
@base.atomic_action_timer("glance.create_image")
def _create_image(self, container_format, image_location, disk_format,
name=None, prefix=None, length=None, **kwargs):
"""Create a new image.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param name: string used to name the image
:param prefix: prefix of generated image name if name not specified
ignore if name specified
:param length: length of autometic generated part in image name
ignore if name specified
:param kwargs: optional parameters to create image
:returns: image object
"""
name = name or self._generate_random_name(prefix, length)
kw = {
"name": name,
"container_format": container_format,
"disk_format": disk_format,
}
kw.update(kwargs)
try:
if os.path.isfile(image_location):
kw["data"] = open(image_location)
else:
kw["copy_from"] = image_location
image = self.clients("glance").images.create(**kw)
time.sleep(CONF.benchmark.glance_image_create_prepoll_delay)
image = bench_utils.wait_for(
image,
is_ready=bench_utils.resource_is("active"),
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.glance_image_create_timeout,
check_interval=CONF.benchmark.
glance_image_create_poll_interval)
finally:
if "data" in kw:
kw["data"].close()
return image
@base.atomic_action_timer("glance.delete_image")
def _delete_image(self, image):
"""Deletes given image.
Returns when the image is actually deleted.
:param image: Image object
"""
image.delete()
bench_utils.wait_for_delete(
image,
update_resource=bench_utils.get_from_manager(),
timeout=CONF.benchmark.glance_image_delete_timeout,
check_interval=CONF.benchmark.glance_image_delete_poll_interval)

View File

@ -0,0 +1,188 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark import types
from rally.benchmark import validation
from rally.common import log as logging
from rally import consts
from rally.plugins.openstack.scenarios.sahara import utils
LOG = logging.getLogger(__name__)
class SaharaClusters(utils.SaharaScenario):
"""Benchmark scenarios for Sahara clusters."""
@types.set(flavor=types.FlavorResourceType,
neutron_net=types.NeutronNetworkResourceType,
floating_ip_pool=types.NeutronNetworkResourceType)
@validation.flavor_exists("flavor")
@validation.required_contexts("users", "sahara_image")
@validation.number("workers_count", minval=1, integer_only=True)
@validation.required_services(consts.Service.SAHARA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["sahara"]})
def create_and_delete_cluster(self, flavor, workers_count, plugin_name,
hadoop_version, floating_ip_pool=None,
volumes_per_node=None,
volumes_size=None, auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None,
enable_anti_affinity=False):
"""Launch and delete a Sahara Cluster.
This scenario launches a Hadoop cluster, waits until it becomes
'Active' and deletes it.
:param flavor: Nova flavor that will be for nodes in the
created node groups
:param workers_count: number of worker instances in a cluster
:param plugin_name: name of a provisioning plugin
:param hadoop_version: version of Hadoop distribution supported by
the specified plugin.
:param floating_ip_pool: floating ip pool name from which Floating
IPs will be allocated. Sahara will determine
automatically how to treat this depending on
it's own configurations. Defaults to None
because in some cases Sahara may work w/o
Floating IPs.
:param volumes_per_node: number of Cinder volumes that will be
attached to every cluster node
:param volumes_size: size of each Cinder volume in GB
:param auto_security_group: boolean value. If set to True Sahara will
create a Security Group for each Node Group
in the Cluster automatically.
:param security_groups: list of security groups that will be used
while creating VMs. If auto_security_group
is set to True, this list can be left empty.
:param node_configs: config dict that will be passed to each Node
Group
:param cluster_configs: config dict that will be passed to the
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
"""
image_id = self.context["tenant"]["sahara_image"]
LOG.debug("Using Image: %s" % image_id)
cluster = self._launch_cluster(
flavor_id=flavor,
image_id=image_id,
workers_count=workers_count,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
floating_ip_pool=floating_ip_pool,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
auto_security_group=auto_security_group,
security_groups=security_groups,
node_configs=node_configs,
cluster_configs=cluster_configs,
enable_anti_affinity=enable_anti_affinity)
self._delete_cluster(cluster)
@types.set(flavor=types.FlavorResourceType)
@validation.flavor_exists("flavor")
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image")
@validation.number("workers_count", minval=1, integer_only=True)
@base.scenario(context={"cleanup": ["sahara"]})
def create_scale_delete_cluster(self, flavor, workers_count, plugin_name,
hadoop_version, deltas,
floating_ip_pool=None,
volumes_per_node=None, volumes_size=None,
auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None,
enable_anti_affinity=False):
"""Launch, scale and delete a Sahara Cluster.
This scenario launches a Hadoop cluster, waits until it becomes
'Active'. Then a series of scale operations is applied. The scaling
happens according to numbers listed in :param deltas. Ex. if
deltas is set to [2, -2] it means that the first scaling operation will
add 2 worker nodes to the cluster and the second will remove two.
:param flavor: Nova flavor that will be for nodes in the
created node groups
:param workers_count: number of worker instances in a cluster
:param plugin_name: name of a provisioning plugin
:param hadoop_version: version of Hadoop distribution supported by
the specified plugin.
:param deltas: list of integers which will be used to add or
remove worker nodes from the cluster
:param floating_ip_pool: floating ip pool name from which Floating
IPs will be allocated. Sahara will determine
automatically how to treat this depending on
it's own configurations. Defaults to None
because in some cases Sahara may work w/o
Floating IPs.
:param neutron_net_id: id of a Neutron network that will be used
for fixed IPs. This parameter is ignored when
Nova Network is set up.
:param volumes_per_node: number of Cinder volumes that will be
attached to every cluster node
:param volumes_size: size of each Cinder volume in GB
:param auto_security_group: boolean value. If set to True Sahara will
create a Security Group for each Node Group
in the Cluster automatically.
:param security_groups: list of security groups that will be used
while creating VMs. If auto_security_group
is set to True this list can be left empty.
:param node_configs: configs dict that will be passed to each Node
Group
:param cluster_configs: configs dict that will be passed to the
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
"""
image_id = self.context["tenant"]["sahara_image"]
LOG.debug("Using Image: %s" % image_id)
cluster = self._launch_cluster(
flavor_id=flavor,
image_id=image_id,
workers_count=workers_count,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
floating_ip_pool=floating_ip_pool,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
auto_security_group=auto_security_group,
security_groups=security_groups,
node_configs=node_configs,
cluster_configs=cluster_configs,
enable_anti_affinity=enable_anti_affinity)
for delta in deltas:
# The Cluster is fetched every time so that its node groups have
# correct 'count' values.
cluster = self.clients("sahara").clusters.get(cluster.id)
if delta == 0:
# Zero scaling makes no sense.
continue
elif delta > 0:
self._scale_cluster_up(cluster, delta)
elif delta < 0:
self._scale_cluster_down(cluster, delta)
self._delete_cluster(cluster)

View File

@ -0,0 +1,137 @@
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
NODE_PROCESSES = {
"vanilla": {
"1.2.1": {
"master": ["namenode", "jobtracker", "oozie"],
"worker": ["datanode", "tasktracker"]
},
"2.3.0": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
},
"2.4.1": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
},
"2.6.0": {
"master": ["namenode", "resourcemanager", "historyserver",
"oozie"],
"worker": ["datanode", "nodemanager"]
}
},
"hdp": {
"1.3.2": {
"master": ["JOBTRACKER", "NAMENODE", "SECONDARY_NAMENODE",
"GANGLIA_SERVER", "NAGIOS_SERVER",
"AMBARI_SERVER", "OOZIE_SERVER"],
"worker": ["TASKTRACKER", "DATANODE", "HDFS_CLIENT",
"MAPREDUCE_CLIENT", "OOZIE_CLIENT", "PIG"]
},
"2.0.6": {
"manager": ["AMBARI_SERVER", "GANGLIA_SERVER",
"NAGIOS_SERVER"],
"master": ["NAMENODE", "SECONDARY_NAMENODE",
"ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT",
"HISTORYSERVER", "RESOURCEMANAGER",
"OOZIE_SERVER"],
"worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT",
"PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT",
"NODEMANAGER", "OOZIE_CLIENT"]
},
"2.2": {
"manager": ["AMBARI_SERVER", "GANGLIA_SERVER",
"NAGIOS_SERVER"],
"master": ["NAMENODE", "SECONDARY_NAMENODE",
"ZOOKEEPER_SERVER", "ZOOKEEPER_CLIENT",
"HISTORYSERVER", "RESOURCEMANAGER",
"OOZIE_SERVER"],
"worker": ["DATANODE", "HDFS_CLIENT", "ZOOKEEPER_CLIENT",
"PIG", "MAPREDUCE2_CLIENT", "YARN_CLIENT",
"NODEMANAGER", "OOZIE_CLIENT", "TEZ_CLIENT"]
}
},
"cdh": {
"5": {
"manager": ["CLOUDERA_MANAGER"],
"master": ["HDFS_NAMENODE", "YARN_RESOURCEMANAGER",
"OOZIE_SERVER", "YARN_JOBHISTORY",
"HDFS_SECONDARYNAMENODE", "HIVE_METASTORE",
"HIVE_SERVER2"],
"worker": ["YARN_NODEMANAGER", "HDFS_DATANODE"]
}
}
}
REPLICATION_CONFIGS = {
"vanilla": {
"1.2.1": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.3.0": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.4.1": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.6.0": {
"target": "HDFS",
"config_name": "dfs.replication"
}
},
"hdp": {
"1.3.2": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.0.6": {
"target": "HDFS",
"config_name": "dfs.replication"
},
"2.2": {
"target": "HDFS",
"config_name": "dfs.replication"
}
},
"cdh": {
"5": {
"target": "HDFS",
"config_name": "dfs_replication"
}
}
}
ANTI_AFFINITY_PROCESSES = {
"vanilla": {
"1.2.1": ["datanode"],
"2.3.0": ["datanode"],
"2.4.1": ["datanode"],
"2.6.0": ["datanode"]
},
"hdp": {
"1.3.2": ["DATANODE"],
"2.0.6": ["DATANODE"],
"2.2": ["DATANODE"]
},
"cdh": {
"5": ["HDFS_DATANODE"]
}
}

View File

@ -0,0 +1,125 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark import validation
from rally.common import log as logging
from rally import consts
from rally.plugins.openstack.scenarios.sahara import utils
LOG = logging.getLogger(__name__)
class SaharaJob(utils.SaharaScenario):
"""Benchmark scenarios for Sahara jobs."""
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image", "sahara_edp",
"sahara_cluster")
@base.scenario(context={"cleanup": ["sahara"]})
def create_launch_job(self, job_type, configs, job_idx=0):
"""Create and execute a Sahara EDP Job.
This scenario Creates a Job entity and launches an execution on a
Cluster.
:param job_type: type of the Data Processing Job
:param configs: config dict that will be passed to a Job Execution
:param job_idx: index of a job in a sequence. This index will be
used to create different atomic actions for each job
in a sequence
"""
mains = self.context["tenant"]["sahara_mains"]
libs = self.context["tenant"]["sahara_libs"]
name = self._generate_random_name(prefix="job_")
job = self.clients("sahara").jobs.create(name=name,
type=job_type,
description="",
mains=mains,
libs=libs)
cluster_id = self.context["tenant"]["sahara_cluster"]
if job_type.lower() == "java":
input_id = None
output_id = None
else:
input_id = self.context["tenant"]["sahara_input"]
output_id = self._create_output_ds().id
self._run_job_execution(job_id=job.id,
cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
configs=configs,
job_idx=job_idx)
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image", "sahara_edp",
"sahara_cluster")
@base.scenario(context={"cleanup": ["sahara"]})
def create_launch_job_sequence(self, jobs):
"""Create and execute a sequence of the Sahara EDP Jobs.
This scenario Creates a Job entity and launches an execution on a
Cluster for every job object provided.
:param jobs: list of jobs that should be executed in one context
"""
for idx, job in enumerate(jobs):
LOG.debug("Launching Job. Sequence #%d" % idx)
self.create_launch_job(job["job_type"], job["configs"], idx)
@validation.required_services(consts.Service.SAHARA)
@validation.required_contexts("users", "sahara_image", "sahara_edp",
"sahara_cluster")
@base.scenario(context={"cleanup": ["sahara"]})
def create_launch_job_sequence_with_scaling(self, jobs, deltas):
"""Create and execute Sahara EDP Jobs on a scaling Cluster.
This scenario Creates a Job entity and launches an execution on a
Cluster for every job object provided. The Cluster is scaled according
to the deltas values and the sequence is launched again.
:param jobs: list of jobs that should be executed in one context
:param deltas: list of integers which will be used to add or
remove worker nodes from the cluster
"""
cluster_id = self.context["tenant"]["sahara_cluster"]
# Executing the sequence before the first scaling
self.create_launch_job_sequence(jobs)
for delta in deltas:
# The Cluster is fetched every time so that its node groups have
# correct 'count' values.
cluster = self.clients("sahara").clusters.get(cluster_id)
LOG.debug("Scaling cluster %s with delta %d" %
(cluster.name, delta))
if delta == 0:
# Zero scaling makes no sense.
continue
elif delta > 0:
self._scale_cluster_up(cluster, delta)
elif delta < 0:
self._scale_cluster_down(cluster, delta)
LOG.debug("Starting Job sequence")
self.create_launch_job_sequence(jobs)

View File

@ -0,0 +1,95 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
from rally.benchmark import types
from rally.benchmark import validation
from rally import consts
from rally.plugins.openstack.scenarios.sahara import utils
class SaharaNodeGroupTemplates(utils.SaharaScenario):
"""Benchmark scenarios for Sahara node group templates."""
@types.set(flavor=types.FlavorResourceType)
@validation.flavor_exists("flavor")
@validation.required_services(consts.Service.SAHARA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["sahara"]})
def create_and_list_node_group_templates(self, flavor,
plugin_name="vanilla",
hadoop_version="1.2.1"):
"""Create and list Sahara Node Group Templates.
This scenario creates two Node Group Templates with different set of
node processes. The master Node Group Template contains Hadoop's
management processes. The worker Node Group Template contains
Haddop's worker processes.
By default the templates are created for the vanilla Hadoop
provisioning plugin using the version 1.2.1
After the templates are created the list operation is called.
:param flavor: Nova flavor that will be for nodes in the
created node groups
:param plugin_name: name of a provisioning plugin
:param hadoop_version: version of Hadoop distribution supported by
the specified plugin.
"""
self._create_master_node_group_template(flavor_id=flavor,
plugin_name=plugin_name,
hadoop_version=hadoop_version)
self._create_worker_node_group_template(flavor_id=flavor,
plugin_name=plugin_name,
hadoop_version=hadoop_version)
self._list_node_group_templates()
@types.set(flavor=types.FlavorResourceType)
@validation.flavor_exists("flavor")
@validation.required_services(consts.Service.SAHARA)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["sahara"]})
def create_delete_node_group_templates(self, flavor,
plugin_name="vanilla",
hadoop_version="1.2.1"):
"""Create and delete Sahara Node Group Templates.
This scenario creates and deletes two most common types of
Node Group Templates.
By default the templates are created for the vanilla Hadoop
provisioning plugin using the version 1.2.1
:param flavor: Nova flavor that will be for nodes in the
created node groups
:param plugin_name: name of a provisioning plugin
:param hadoop_version: version of Hadoop distribution supported by
the specified plugin.
"""
master_ngt = self._create_master_node_group_template(
flavor_id=flavor,
plugin_name=plugin_name,
hadoop_version=hadoop_version)
worker_ngt = self._create_worker_node_group_template(
flavor_id=flavor,
plugin_name=plugin_name,
hadoop_version=hadoop_version)
self._delete_node_group_template(master_ngt)
self._delete_node_group_template(worker_ngt)

View File

@ -0,0 +1,519 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from oslo_config import cfg
from oslo_utils import uuidutils
from saharaclient.api import base as sahara_base
from rally.benchmark.scenarios import base
from rally.benchmark import utils as bench_utils
from rally.common.i18n import _
from rally.common import log as logging
from rally import consts
from rally import exceptions
from rally.plugins.openstack.scenarios.sahara import consts as sahara_consts
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SAHARA_TIMEOUT_OPTS = [
cfg.IntOpt("cluster_create_timeout", default=1800,
help="A timeout in seconds for a cluster create operation"),
cfg.IntOpt("cluster_delete_timeout", default=900,
help="A timeout in seconds for a cluster delete operation"),
cfg.IntOpt("cluster_check_interval", default=5,
help="Cluster status polling interval in seconds"),
cfg.IntOpt("job_execution_timeout", default=600,
help="A timeout in seconds for a Job Execution to complete"),
cfg.IntOpt("job_check_interval", default=5,
help="Job Execution status polling interval in seconds")
]
benchmark_group = cfg.OptGroup(name="benchmark", title="benchmark options")
CONF.register_opts(SAHARA_TIMEOUT_OPTS, group=benchmark_group)
class SaharaScenario(base.Scenario):
"""Base class for Sahara scenarios with basic atomic actions."""
RESOURCE_NAME_LENGTH = 20
@base.atomic_action_timer("sahara.list_node_group_templates")
def _list_node_group_templates(self):
"""Return user Node Group Templates list."""
return self.clients("sahara").node_group_templates.list()
@base.atomic_action_timer("sahara.create_master_node_group_template")
def _create_master_node_group_template(self, flavor_id, plugin_name,
hadoop_version):
"""Create a master Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:returns: The created Template
"""
name = self._generate_random_name(prefix="master-ngt-")
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["master"])
@base.atomic_action_timer("sahara.create_worker_node_group_template")
def _create_worker_node_group_template(self, flavor_id, plugin_name,
hadoop_version):
"""Create a worker Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:returns: The created Template
"""
name = self._generate_random_name(prefix="worker-ngt-")
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"])
@base.atomic_action_timer("sahara.delete_node_group_template")
def _delete_node_group_template(self, node_group):
"""Delete a Node Group Template by id.
:param node_group: The Node Group Template to be deleted
"""
self.clients("sahara").node_group_templates.delete(node_group.id)
def _wait_active(self, cluster_object):
bench_utils.wait_for(
resource=cluster_object, is_ready=self._is_cluster_active,
update_resource=self._update_cluster,
timeout=CONF.benchmark.cluster_create_timeout,
check_interval=CONF.benchmark.cluster_check_interval)
def _setup_neutron_floating_ip_pool(self, name_or_id):
if name_or_id:
if uuidutils.is_uuid_like(name_or_id):
# Looks like an id is provided Return as is.
return name_or_id
else:
# It's a name. Changing to id.
for net in self.clients("neutron").list_networks()["networks"]:
if net["name"] == name_or_id:
return net["id"]
# If the name is not found in the list. Exit with error.
raise exceptions.BenchmarkSetupFailure(
"Could not resolve Floating IP Pool name %(name)s to id" %
name_or_id)
else:
# Pool is not provided. Using the one set as GW for current router.
net = self.context["tenant"]["networks"][0]
router_id = net["router_id"]
router = self.clients("neutron").show_router(router_id)["router"]
net_id = router["external_gateway_info"]["network_id"]
return net_id
def _setup_nova_floating_ip_pool(self, name):
if name:
# The name is provided returning it as is.
return name
else:
# The name is not provided. Discovering
LOG.debug("No Floating Ip Pool provided. Taking random.")
pools = self.clients("nova").floating_ip_pools.list()
if pools:
return random.choice(pools).name
else:
LOG.warn("No Floating Ip Pools found. This may cause "
"instances to be unreachable.")
return None
def _setup_floating_ip_pool(self, node_groups, floating_ip_pool):
if consts.Service.NEUTRON in self._clients.services().values():
LOG.debug("Neutron detected as networking backend.")
floating_ip_pool_value = self._setup_neutron_floating_ip_pool(
floating_ip_pool)
else:
LOG.debug("Nova Network detected as networking backend.")
floating_ip_pool_value = self._setup_nova_floating_ip_pool(
floating_ip_pool)
if floating_ip_pool_value:
LOG.debug("Using floating ip pool %s." % floating_ip_pool_value)
# If the pool is set by any means assign it to all node groups.
for ng in node_groups:
ng["floating_ip_pool"] = floating_ip_pool_value
return node_groups
def _setup_volumes(self, node_groups, volumes_per_node, volumes_size):
if volumes_per_node:
LOG.debug("Adding volumes config to Node Groups")
for ng in node_groups:
ng["volumes_per_node"] = volumes_per_node
ng["volumes_size"] = volumes_size
return node_groups
def _setup_security_groups(self, node_groups, auto_security_group,
security_groups):
if auto_security_group:
LOG.debug("Auto security group enabled. Adding to Node Groups.")
if security_groups:
LOG.debug("Adding provided Security Groups to Node Groups.")
for ng in node_groups:
if auto_security_group:
ng["auto_security_group"] = auto_security_group
if security_groups:
ng["security_groups"] = security_groups
return node_groups
def _setup_node_configs(self, node_groups, node_configs):
if node_configs:
LOG.debug("Adding Hadoop configs to Node Groups")
for ng in node_groups:
ng["node_configs"] = node_configs
return node_groups
def _setup_replication_config(self, hadoop_version, workers_count,
plugin_name):
replication_value = min(workers_count, 3)
# 3 is a default Hadoop replication
conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version]
LOG.debug("Using replication factor: %s" % replication_value)
replication_config = {
conf["target"]: {
conf["config_name"]: replication_value
}
}
return replication_config
@base.atomic_action_timer("sahara.launch_cluster")
def _launch_cluster(self, plugin_name, hadoop_version, flavor_id,
image_id, workers_count, floating_ip_pool=None,
volumes_per_node=None,
volumes_size=None, auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None, enable_anti_affinity=False,
wait_active=True):
"""Create a cluster and wait until it becomes Active.
The cluster is created with two node groups. The master Node Group is
created with one instance. The worker node group contains
node_count - 1 instances.
:param plugin_name: provisioning plugin name
:param hadoop_version: Hadoop version supported by the plugin
:param flavor_id: flavor which will be used to create instances
:param image_id: image id that will be used to boot instances
:param workers_count: number of worker instances. All plugins will
also add one Master instance and some plugins
add a Manager instance.
:param floating_ip_pool: floating ip pool name from which Floating
IPs will be allocated
:param volumes_per_node: number of Cinder volumes that will be
attached to every cluster node
:param volumes_size: size of each Cinder volume in GB
:param auto_security_group: boolean value. If set to True Sahara will
create a Security Group for each Node Group
in the Cluster automatically.
:param security_groups: list of security groups that will be used
while creating VMs. If auto_security_group is
set to True, this list can be left empty.
:param node_configs: configs dict that will be passed to each Node
Group
:param cluster_configs: configs dict that will be passed to the
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
:param wait_active: Wait until a Cluster gets int "Active" state
:returns: created cluster
"""
node_groups = [
{
"name": "master-ng",
"flavor_id": flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["master"],
"count": 1
}, {
"name": "worker-ng",
"flavor_id": flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": workers_count
}
]
if "manager" in (sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]):
# Adding manager group separately as it is supported only in
# specific configurations.
node_groups.append({
"name": "manager-ng",
"flavor_id": flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["manager"],
"count": 1
})
node_groups = self._setup_floating_ip_pool(node_groups,
floating_ip_pool)
neutron_net_id = self._get_neutron_net_id()
node_groups = self._setup_volumes(node_groups, volumes_per_node,
volumes_size)
node_groups = self._setup_security_groups(node_groups,
auto_security_group,
security_groups)
node_groups = self._setup_node_configs(node_groups, node_configs)
replication_config = self._setup_replication_config(hadoop_version,
workers_count,
plugin_name)
# The replication factor should be set for small clusters. However the
# cluster_configs parameter can override it
merged_cluster_configs = self._merge_configs(replication_config,
cluster_configs)
aa_processes = None
if enable_anti_affinity:
aa_processes = (sahara_consts.ANTI_AFFINITY_PROCESSES[plugin_name]
[hadoop_version])
name = self._generate_random_name(prefix="sahara-cluster-")
cluster_object = self.clients("sahara").clusters.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
node_groups=node_groups,
default_image_id=image_id,
net_id=neutron_net_id,
cluster_configs=merged_cluster_configs,
anti_affinity=aa_processes
)
if wait_active:
self._wait_active(cluster_object)
return self.clients("sahara").clusters.get(cluster_object.id)
def _update_cluster(self, cluster):
return self.clients("sahara").clusters.get(cluster.id)
def _is_cluster_active(self, cluster):
cluster_status = cluster.status.lower()
if cluster_status == "error":
raise exceptions.SaharaClusterFailure(
name=cluster.name,
action="start",
reason=cluster.status_description)
return cluster_status == "active"
def _scale_cluster(self, cluster, delta):
"""The scaling helper.
This method finds the worker node group in a cluster, builds a
scale_object required by Sahara API and waits for the scaling to
complete.
NOTE: This method is not meant to be called directly in benchmarks.
There two specific scaling methods of up and down scaling which have
different atomic timers.
"""
worker_node_group = [g for g in cluster.node_groups
if "worker" in g["name"]][0]
scale_object = {
"resize_node_groups": [
{
"name": worker_node_group["name"],
"count": worker_node_group["count"] + delta
}
]
}
self.clients("sahara").clusters.scale(cluster.id, scale_object)
self._wait_active(cluster)
@base.atomic_action_timer("sahara.scale_up")
def _scale_cluster_up(self, cluster, delta):
"""Add a given number of worker nodes to the cluster.
:param cluster: The cluster to be scaled
:param delta: The number of workers to be added. (A positive number is
expected here)
"""
self._scale_cluster(cluster, delta)
@base.atomic_action_timer("sahara.scale_down")
def _scale_cluster_down(self, cluster, delta):
"""Remove a given number of worker nodes from the cluster.
:param cluster: The cluster to be scaled
:param delta: The number of workers to be removed. (A negative number
is expected here)
"""
self._scale_cluster(cluster, delta)
@base.atomic_action_timer("sahara.delete_cluster")
def _delete_cluster(self, cluster):
"""Delete cluster.
:param cluster: cluster to delete
"""
self.clients("sahara").clusters.delete(cluster.id)
bench_utils.wait_for(
resource=cluster.id,
timeout=CONF.benchmark.cluster_delete_timeout,
check_interval=CONF.benchmark.cluster_check_interval,
is_ready=self._is_cluster_deleted)
def _is_cluster_deleted(self, cl_id):
try:
self.clients("sahara").clusters.get(cl_id)
return False
except sahara_base.APIException:
return True
def _create_output_ds(self):
"""Create an output Data Source based on EDP context
:return: The created Data Source
"""
ds_type = self.context["sahara_output_conf"]["output_type"]
url_prefix = self.context["sahara_output_conf"]["output_url_prefix"]
if ds_type == "swift":
raise exceptions.RallyException(
_("Swift Data Sources are not implemented yet"))
url = (url_prefix.rstrip("/") + "/%s" %
self._generate_random_name(length=10))
return self.clients("sahara").data_sources.create(
name=self._generate_random_name(prefix="out_"),
description="",
data_source_type=ds_type,
url=url)
def _run_job_execution(self, job_id, cluster_id, input_id, output_id,
configs, job_idx):
"""Run a Job Execution and wait until it completes or fails.
The Job Execution is accepted as successful when Oozie reports
"success" or "succeeded" status. The failure statuses are "failed" and
"killed".
The timeout and the polling interval may be configured through
"job_execution_timeout" and "job_check_interval" parameters under the
"benchmark" section.
:param job_id: The Job id that will be executed
:param cluster_id: The Cluster id which will execute the Job
:param input_id: The input Data Source id
:param output_id: The output Data Source id
:param configs: The config dict that will be passed as Job Execution's
parameters.
:param job_idx: The index of a job in a sequence
"""
@base.atomic_action_timer("sahara.job_execution_%s" % job_idx)
def run(self):
job_execution = self.clients("sahara").job_executions.create(
job_id=job_id,
cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
configs=configs)
bench_utils.wait_for(
resource=job_execution.id,
is_ready=self._job_execution_is_finished,
timeout=CONF.benchmark.job_execution_timeout,
check_interval=CONF.benchmark.job_check_interval)
run(self)
def _job_execution_is_finished(self, je_id):
status = self.clients("sahara").job_executions.get(je_id).info[
"status"]
if status.lower() in ("success", "succeeded"):
return True
elif status.lower() in ("failed", "killed"):
raise exceptions.RallyException(
"Job execution %s has failed" % je_id)
return False
def _merge_configs(self, *configs):
"""Merge configs in special format.
It supports merging of configs in the following format:
applicable_target -> config_name -> config_value
"""
result = {}
for config_dict in configs:
if config_dict:
for a_target in config_dict:
if a_target not in result or not result[a_target]:
result[a_target] = {}
result[a_target].update(config_dict[a_target])
return result
def _get_neutron_net_id(self):
"""Get the Neutron Network id from context.
If Nova Network is used as networking backend, None is returned.
:return: Network id for Neutron or None for Nova Networking.
"""
if consts.Service.NEUTRON not in self._clients.services().values():
return None
# Taking net id from context.
net = self.context["tenant"]["networks"][0]
neutron_net_id = net["id"]
LOG.debug("Using neutron network %s." % neutron_net_id)
LOG.debug("Using neutron router %s." % net["router_id"])
return neutron_net_id

View File

@ -0,0 +1,124 @@
# Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
from rally.benchmark.scenarios import base
from rally.benchmark import validation
from rally import consts
from rally.plugins.openstack.scenarios.swift import utils
class SwiftObjects(utils.SwiftScenario):
"""Benchmark scenarios for Swift Objects."""
@validation.required_services(consts.Service.SWIFT)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["swift"]})
def create_container_and_object_then_list_objects(
self, objects_per_container=1,
object_size=1024, **kwargs):
"""Create container and objects then list all objects.
:param objects_per_container: int, number of objects to upload
:param object_size: int, temporary local object size
:param kwargs: dict, optional parameters to create container
"""
key_suffix = "object"
if objects_per_container > 1:
key_suffix = "%i_objects" % objects_per_container
container_name = None
with tempfile.TemporaryFile() as dummy_file:
# set dummy file to specified object size
dummy_file.truncate(object_size)
container_name = self._create_container(**kwargs)
with base.AtomicAction(self, "swift.create_%s" % key_suffix):
for i in range(objects_per_container):
dummy_file.seek(0)
self._upload_object(container_name, dummy_file,
atomic_action=False)
self._list_objects(container_name)
@validation.required_services(consts.Service.SWIFT)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["swift"]})
def create_container_and_object_then_delete_all(
self, objects_per_container=1,
object_size=1024, **kwargs):
"""Create container and objects then delete everything created.
:param objects_per_container: int, number of objects to upload
:param object_size: int, temporary local object size
:param kwargs: dict, optional parameters to create container
"""
key_suffix = "object"
if objects_per_container > 1:
key_suffix = "%i_objects" % objects_per_container
container_name = None
objects_list = []
with tempfile.TemporaryFile() as dummy_file:
# set dummy file to specified object size
dummy_file.truncate(object_size)
container_name = self._create_container(**kwargs)
with base.AtomicAction(self, "swift.create_%s" % key_suffix):
for i in range(objects_per_container):
dummy_file.seek(0)
object_name = self._upload_object(container_name,
dummy_file,
atomic_action=False)[1]
objects_list.append(object_name)
with base.AtomicAction(self, "swift.delete_%s" % key_suffix):
for object_name in objects_list:
self._delete_object(container_name, object_name,
atomic_action=False)
self._delete_container(container_name)
@validation.required_services(consts.Service.SWIFT)
@validation.required_openstack(users=True)
@base.scenario(context={"cleanup": ["swift"]})
def create_container_and_object_then_download_object(
self, objects_per_container=1,
object_size=1024, **kwargs):
"""Create container and objects then download all objects.
:param objects_per_container: int, number of objects to upload
:param object_size: int, temporary local object size
:param kwargs: dict, optional parameters to create container
"""
key_suffix = "object"
if objects_per_container > 1:
key_suffix = "%i_objects" % objects_per_container
container_name = None
objects_list = []
with tempfile.TemporaryFile() as dummy_file:
# set dummy file to specified object size
dummy_file.truncate(object_size)
container_name = self._create_container(**kwargs)
with base.AtomicAction(self, "swift.create_%s" % key_suffix):
for i in range(objects_per_container):
dummy_file.seek(0)
object_name = self._upload_object(container_name,
dummy_file,
atomic_action=False)[1]
objects_list.append(object_name)
with base.AtomicAction(self, "swift.download_%s" % key_suffix):
for object_name in objects_list:
self._download_object(container_name, object_name,
atomic_action=False)

View File

@ -0,0 +1,163 @@
# Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.benchmark.scenarios import base
class SwiftScenario(base.Scenario):
"""Base class for Swift scenarios with basic atomic actions."""
@base.atomic_action_timer("swift.list_containers")
def _list_containers(self, full_listing=True, **kwargs):
"""Return list of containers.
:param full_listing: bool, enable unlimit number of listing returned
:param kwargs: dict, other optional parameters to get_account
:returns: tuple, (dict of response headers, a list of containers)
"""
return self.clients("swift").get_account(full_listing=full_listing,
**kwargs)
def _create_container(self, container_name=None, public=False,
atomic_action=True, **kwargs):
"""Create a new container with given name.
:param container_name: str, name of the container to create
:param public: bool, set container as public
:param atomic_action: bool, enable create container to be tracked as an
atomic action
:param kwargs: dict, other optional parameters to put_container
:returns: container name
"""
if public:
kwargs.setdefault("headers", {})
kwargs["headers"].setdefault("X-Container-Read", ".r:*,.rlistings")
if container_name is None:
container_name = self._generate_random_name(
prefix="rally_container_")
if atomic_action:
with base.AtomicAction(self, "swift.create_container"):
self.clients("swift").put_container(container_name, **kwargs)
else:
self.clients("swift").put_container(container_name, **kwargs)
return container_name
def _delete_container(self, container_name, atomic_action=True, **kwargs):
"""Delete a container with given name.
:param container_name: str, name of the container to delete
:param atomic_action: bool, enable delete container to be tracked as an
atomic action
:param kwargs: dict, other optional parameters to delete_container
"""
if atomic_action:
with base.AtomicAction(self, "swift.delete_container"):
self.clients("swift").delete_container(container_name,
**kwargs)
else:
self.clients("swift").delete_container(container_name, **kwargs)
def _list_objects(self, container_name, full_listing=True,
atomic_action=True, **kwargs):
"""Return objects inside container.
:param container_name: str, name of the container to make the list
objects operation against
:param full_listing: bool, enable unlimit number of listing returned
:param atomic_action: bool, enable list objects to be tracked as an
atomic action
:param kwargs: dict, other optional parameters to get_container
:returns: tuple, (dict of response headers, a list of objects)
"""
if atomic_action:
with base.AtomicAction(self, "swift.list_objects"):
return self.clients("swift").get_container(
container_name, full_listing=full_listing,
**kwargs)
return self.clients("swift").get_container(container_name,
full_listing=full_listing,
**kwargs)
def _upload_object(self, container_name, content, object_name=None,
atomic_action=True, **kwargs):
"""Upload content to a given container.
:param container_name: str, name of the container to upload object to
:param content: file stream, content to upload
:param object_name: str, name of the object to upload
:param atomic_action: bool, enable upload object to be tracked as an
atomic action
:param kwargs: dict, other optional parameters to put_object
:returns: tuple, (etag and object name)
"""
if object_name is None:
object_name = self._generate_random_name(prefix="rally_object_")
if atomic_action:
with base.AtomicAction(self, "swift.upload_object"):
return (self.clients("swift").put_object(container_name,
object_name, content,
**kwargs),
object_name)
return (self.clients("swift").put_object(container_name, object_name,
content, **kwargs),
object_name)
def _download_object(self, container_name, object_name, atomic_action=True,
**kwargs):
"""Download object from container.
:param container_name: str, name of the container to download object
from
:param object_name: str, name of the object to download
:param atomic_action: bool, enable download object to be tracked as an
atomic action
:param kwargs: dict, other optional parameters to get_object
:returns: tuple, (dict of response headers, the object's contents)
"""
if atomic_action:
with base.AtomicAction(self, "swift.download_object"):
return self.clients("swift").get_object(container_name,
object_name, **kwargs)
return self.clients("swift").get_object(container_name, object_name,
**kwargs)
def _delete_object(self, container_name, object_name, atomic_action=True,
**kwargs):
"""Delete object from container.
:param container_name: str, name of the container to delete object from
:param object_name: str, name of the object to delete
:param atomic_action: bool, enable delete object to be tracked as an
atomic action
:param kwargs: dict, other optional parameters to delete_object
"""
if atomic_action:
with base.AtomicAction(self, "swift.delete_object"):
self.clients("swift").delete_object(container_name,
object_name, **kwargs)
else:
self.clients("swift").delete_object(container_name, object_name,
**kwargs)

View File

@ -22,7 +22,6 @@ from tests.unit import test
CONF = cfg.CONF
CTX = "rally.plugins.openstack.context.sahara"
SCN = "rally.benchmark.scenarios"
class SaharaClusterTestCase(test.TestCase):

View File

@ -22,7 +22,7 @@ from tests.unit import test
BASE_CTX = "rally.benchmark.context"
CTX = "rally.plugins.openstack.context.sahara.sahara_image"
BASE_SCN = "rally.benchmark.scenarios"
SCN = "rally.benchmark.scenarios"
SCN = "rally.plugins.openstack.scenarios"
class SaharaImageTestCase(test.TestCase):

View File

@ -0,0 +1,253 @@
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslotest import mockpatch
from rally.plugins.openstack.scenarios.cinder import utils
from tests.unit import test
BM_UTILS = "rally.benchmark.utils"
CINDER_UTILS = "rally.plugins.openstack.scenarios.cinder.utils"
class CinderScenarioTestCase(test.TestCase):
def setUp(self):
super(CinderScenarioTestCase, self).setUp()
self.res_is = mockpatch.Patch(BM_UTILS + ".resource_is")
self.get_fm = mockpatch.Patch(BM_UTILS + ".get_from_manager")
self.wait_for = mockpatch.Patch(CINDER_UTILS + ".bench_utils.wait_for")
self.wait_for_delete = mockpatch.Patch(
CINDER_UTILS + ".bench_utils.wait_for_delete")
self.useFixture(self.wait_for)
self.useFixture(self.wait_for_delete)
self.useFixture(self.res_is)
self.useFixture(self.get_fm)
self.gfm = self.get_fm.mock
self.useFixture(mockpatch.Patch("time.sleep"))
self.scenario = utils.CinderScenario()
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__list_volumes(self, mock_clients):
volumes_list = mock.Mock()
mock_clients("cinder").volumes.list.return_value = volumes_list
return_volumes_list = self.scenario._list_volumes()
self.assertEqual(volumes_list, return_volumes_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_volumes")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__list_snapshots(self, mock_clients):
snapsht_lst = mock.Mock()
mock_clients("cinder").volume_snapshots.list.return_value = snapsht_lst
return_snapshots_list = self.scenario._list_snapshots()
self.assertEqual(snapsht_lst, return_snapshots_list)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.list_snapshots")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__create_volume(self, mock_clients):
CONF = cfg.CONF
volume = mock.Mock()
mock_clients("cinder").volumes.create.return_value = volume
return_volume = self.scenario._create_volume(1)
self.wait_for.mock.assert_called_once_with(
volume,
is_ready=self.res_is.mock(),
update_resource=self.gfm(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.res_is.mock.assert_has_calls([mock.call("available")])
self.assertEqual(self.wait_for.mock(), return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__create_volume_with_size_range(self, mock_clients, mock_random):
CONF = cfg.CONF
volume = mock.Mock()
mock_clients("cinder").volumes.create.return_value = volume
mock_random.randint.return_value = 3
return_volume = self.scenario._create_volume(
size={"min": 1, "max": 5},
display_name="TestVolume")
mock_clients("cinder").volumes.create.assert_called_once_with(
3, display_name="TestVolume")
self.wait_for.mock.assert_called_once_with(
volume,
is_ready=self.res_is.mock(),
update_resource=self.gfm(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.res_is.mock.assert_has_calls([mock.call("available")])
self.assertEqual(self.wait_for.mock(), return_volume)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_volume")
def test__delete_volume(self):
cinder = mock.Mock()
self.scenario._delete_volume(cinder)
cinder.delete.assert_called_once_with()
self.wait_for_delete.mock.assert_called_once_with(
cinder,
update_resource=self.gfm(),
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_volume")
@mock.patch("rally.plugins.openstack.scenarios.cinder.utils.random")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__extend_volume_with_size_range(self, mock_clients, mock_random):
CONF = cfg.CONF
volume = mock.Mock()
mock_random.randint.return_value = 3
mock_clients("cinder").volumes.extend.return_value = volume
self.scenario._extend_volume(volume, new_size={"min": 1, "max": 5})
volume.extend.assert_called_once_with(volume, 3)
self.wait_for.mock.assert_called_once_with(
volume,
is_ready=self.res_is.mock(),
update_resource=self.gfm(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.res_is.mock.assert_has_calls([mock.call("available")])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__extend_volume(self, mock_clients):
CONF = cfg.CONF
volume = mock.Mock()
mock_clients("cinder").volumes.extend.return_value = volume
self.scenario._extend_volume(volume, 2)
self.wait_for.mock.assert_called_once_with(
volume,
is_ready=self.res_is.mock(),
update_resource=self.gfm(),
timeout=CONF.benchmark.cinder_volume_create_timeout,
check_interval=CONF.benchmark.cinder_volume_create_poll_interval
)
self.res_is.mock.assert_has_calls([mock.call("available")])
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.extend_volume")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__upload_volume_to_image(self, mock_clients):
volume = mock.Mock()
image = {"os-volume_upload_image": {"image_id": 1}}
volume.upload_to_image.return_value = (None, image)
mock_clients("cinder").images.get.return_value = image
self.scenario._generate_random_name = mock.Mock(
return_value="test_vol")
self.scenario._upload_volume_to_image(volume, False,
"container", "disk")
volume.upload_to_image.assert_called_once_with(False, "test_vol",
"container", "disk")
self.assertTrue(self.wait_for.mock.called)
self.assertEqual(2, self.wait_for.mock.call_count)
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__create_snapshot(self, mock_clients):
snapshot = mock.Mock()
mock_clients("cinder").volume_snapshots.create.return_value = snapshot
return_snapshot = self.scenario._create_snapshot("uuid", False)
self.wait_for.mock.assert_called_once_with(
snapshot,
is_ready=self.res_is.mock(),
update_resource=self.gfm(),
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.res_is.mock.assert_has_calls([mock.call("available")])
self.assertEqual(self.wait_for.mock(), return_snapshot)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_snapshot")
def test__delete_snapshot(self):
snapshot = mock.Mock()
self.scenario._delete_snapshot(snapshot)
snapshot.delete.assert_called_once_with()
self.wait_for_delete.mock.assert_called_once_with(
snapshot,
update_resource=self.gfm(),
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_snapshot")
@mock.patch(CINDER_UTILS + ".CinderScenario.clients")
def test__create_backup(self, mock_clients):
backup = mock.Mock()
mock_clients("cinder").backups.create.return_value = backup
return_backup = self.scenario._create_backup("uuid")
self.wait_for.mock.assert_called_once_with(
backup,
is_ready=self.res_is.mock(),
update_resource=self.gfm(),
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self.res_is.mock.assert_has_calls([mock.call("available")])
self.assertEqual(self.wait_for.mock(), return_backup)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.create_backup")
def test__delete_backup(self):
backup = mock.Mock()
self.scenario._delete_backup(backup)
backup.delete.assert_called_once_with()
self.wait_for_delete.mock.assert_called_once_with(
backup,
update_resource=self.gfm(),
timeout=cfg.CONF.benchmark.cinder_volume_create_timeout,
check_interval=cfg.CONF.benchmark
.cinder_volume_create_poll_interval)
self._test_atomic_action_timer(self.scenario.atomic_actions(),
"cinder.delete_backup")
def test__get_random_server(self):
servers = [1, 2, 3]
context = {"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake",
"users_per_tenant": 1}],
"tenant": {"id": "fake", "servers": servers}}
self.scenario.context = context
self.scenario.clients = mock.Mock()
self.scenario.clients("nova").servers.get = mock.Mock(
side_effect=lambda arg: arg)
server_id = self.scenario.get_random_server()
self.assertIn(server_id, servers)

View File

@ -0,0 +1,399 @@
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.cinder import volumes
from tests.unit import fakes
from tests.unit import test
CINDER_VOLUMES = ("rally.plugins.openstack.scenarios.cinder.volumes"
".CinderVolumes")
class fake_type(object):
name = "fake"
class CinderServersTestCase(test.TestCase):
def test_create_and_list_volume(self):
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock()
scenario._list_volumes = mock.MagicMock()
scenario.create_and_list_volume(1, True, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario._list_volumes.assert_called_once_with(True)
def test_list_volumes(self):
scenario = volumes.CinderVolumes()
scenario._list_volumes = mock.MagicMock()
scenario.list_volumes(True)
scenario._list_volumes.assert_called_once_with(True)
def test_create_and_delete_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.sleep_between = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_and_delete_volume(size=1, min_sleep=10, max_sleep=20,
fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.create_volume(1, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
def test_create_and_extend_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._extend_volume = mock.MagicMock(return_value=fake_volume)
scenario.sleep_between = mock.MagicMock()
scenario._delete_volume = mock.MagicMock()
scenario.create_and_extend_volume(1, 2, 10, 20, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
self.assertTrue(scenario._extend_volume.called)
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_from_image_and_delete_volume(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_and_delete_volume(1, image="fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_volume_from_image(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario.create_volume(1, image="fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
def test_create_volume_from_image_and_list(self):
fake_volume = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._list_volumes = mock.MagicMock()
scenario.create_and_list_volume(1, True, "fake_image")
scenario._create_volume.assert_called_once_with(1,
imageRef="fake_image")
scenario._list_volumes.assert_called_once_with(True)
def test_create_from_volume_and_delete_volume(self):
fake_volume = mock.MagicMock()
vol_size = 1
scenario = volumes.CinderVolumes(
context={"user": {"tenant_id": "fake"},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid"}]}})
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_from_volume_and_delete_volume(vol_size)
scenario._create_volume.assert_called_once_with(1, source_volid="uuid")
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_and_delete_snapshot(self):
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(
context={"user": {"tenant_id": "fake"},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid"}]}})
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario.sleep_between = mock.MagicMock()
scenario._delete_snapshot = mock.MagicMock()
scenario.create_and_delete_snapshot(False, 10, 20, fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid", force=False,
fakearg="f")
scenario.sleep_between.assert_called_once_with(10, 20)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
def test_create_and_list_snapshots(self):
fake_snapshot = mock.MagicMock()
scenario = volumes.CinderVolumes(
context={"user": {"tenant_id": "fake"},
"tenant": {"id": "fake", "name": "fake",
"volumes": [{"id": "uuid"}]}})
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._list_snapshots = mock.MagicMock()
scenario.create_and_list_snapshots(False, True, fakearg="f")
scenario._create_snapshot.assert_called_once_with("uuid", force=False,
fakearg="f")
scenario._list_snapshots.assert_called_once_with(True)
def test_create_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_server = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario.create_and_attach_volume(10, "img", "0")
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_server.assert_called_once_with(fake_server)
def test_create_and_upload_volume_to_image(self):
fake_volume = mock.Mock()
fake_image = mock.Mock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._upload_volume_to_image = mock.MagicMock(
return_value=fake_image)
scenario._delete_volume = mock.MagicMock()
scenario._delete_image = mock.MagicMock()
scenario.create_and_upload_volume_to_image(2,
container_format="fake",
disk_format="disk",
do_delete=False)
scenario._create_volume.assert_called_once_with(2)
scenario._upload_volume_to_image.assert_called_once_with(fake_volume,
False,
"fake",
"disk")
scenario._create_volume.reset_mock()
scenario._upload_volume_to_image.reset_mock()
scenario.create_and_upload_volume_to_image(1, do_delete=True)
scenario._create_volume.assert_called_once_with(1)
scenario._upload_volume_to_image.assert_called_once_with(fake_volume,
False,
"bare",
"raw")
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_image.assert_called_once_with(fake_image)
def test_create_snapshot_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
context = {"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake", "users_per_tenant": 1}],
"tenant": {"id": "fake", "name": "fake", "servers": [1]}}
scenario = volumes.CinderVolumes(context)
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
scenario.clients = mock.MagicMock()
scenario.clients("nova").servers.get = mock.MagicMock(
return_value=fake_server)
scenario.create_snapshot_and_attach_volume()
self.assertTrue(scenario._create_volume.called)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_snapshot_and_attach_volume_use_volume_type(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_server = mock.MagicMock()
context = {"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake", "users_per_tenant": 1}],
"tenant": {"id": "fake", "name": "fake", "servers": [1]}}
scenario = volumes.CinderVolumes(context)
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._boot_server = mock.MagicMock(return_value=fake_server)
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
fake = fake_type()
scenario.clients = mock.MagicMock()
scenario.clients("cinder").volume_types.list = mock.MagicMock(
return_value=[fake])
scenario.clients("nova").servers.get = mock.MagicMock(
return_value=fake_server)
scenario.create_snapshot_and_attach_volume(volume_type=True)
# Make sure create volume's second arg was the correct volume type.
# fake or none (randomly selected)
self.assertTrue(scenario._create_volume.called)
vol_type = scenario._create_volume.call_args_list[0][1]["volume_type"]
self.assertTrue(vol_type is fake.name or vol_type is None)
scenario._create_snapshot.assert_called_once_with(fake_volume.id,
False)
scenario._delete_snapshot.assert_called_once_with(fake_snapshot)
scenario._attach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._detach_volume.assert_called_once_with(fake_server,
fake_volume)
scenario._delete_volume.assert_called_once_with(fake_volume)
def test_create_nested_snapshots_and_attach_volume(self):
fake_volume = mock.MagicMock()
fake_snapshot = mock.MagicMock()
fake_clients = fakes.FakeClients()
fake_server = fake_clients.nova().servers.create("test_server",
"image_id_01",
"flavor_id_01")
scenario = volumes.CinderVolumes(
context={"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake", "users_per_tenant": 1}],
"tenant": {"id": "fake", "name": "fake",
"servers": [fake_server.uuid]}})
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(return_value=fake_snapshot)
scenario._delete_snapshot = mock.MagicMock()
scenario._clients = fake_clients
scenario.create_nested_snapshots_and_attach_volume()
volume_count = scenario._create_volume.call_count
snapshots_count = scenario._create_snapshot.call_count
attached_count = scenario._attach_volume.call_count
self.assertEqual(scenario._delete_volume.call_count, volume_count)
self.assertEqual(scenario._delete_snapshot.call_count, snapshots_count)
self.assertEqual(scenario._detach_volume.call_count, attached_count)
def test_create_nested_snapshots_calls_order(self):
fake_volume1 = mock.MagicMock()
fake_volume2 = mock.MagicMock()
fake_snapshot1 = mock.MagicMock()
fake_snapshot2 = mock.MagicMock()
fake_clients = fakes.FakeClients()
fake_server = fake_clients.nova().servers.create("test_server",
"image_id_01",
"flavor_id_01")
scenario = volumes.CinderVolumes(
context={"user": {"tenant_id": "fake"},
"users": [{"tenant_id": "fake", "users_per_tenant": 1}],
"tenant": {"id": "fake", "name": "fake",
"servers": [fake_server.uuid]}})
scenario._attach_volume = mock.MagicMock()
scenario._detach_volume = mock.MagicMock()
scenario._delete_server = mock.MagicMock()
scenario._create_volume = mock.MagicMock(
side_effect=[fake_volume1, fake_volume2])
scenario._delete_volume = mock.MagicMock()
scenario._create_snapshot = mock.MagicMock(
side_effect=[fake_snapshot1, fake_snapshot2])
scenario._delete_snapshot = mock.MagicMock()
scenario._clients = fake_clients
scenario.create_nested_snapshots_and_attach_volume(
nested_level={"min": 2, "max": 2})
vol_delete_calls = [mock.call(fake_volume2), mock.call(fake_volume1)]
snap_delete_calls = [mock.call(fake_snapshot2),
mock.call(fake_snapshot1)]
scenario._delete_volume.assert_has_calls(vol_delete_calls)
scenario._delete_snapshot.assert_has_calls(snap_delete_calls)
def test_create_volume_backup(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._create_backup = mock.MagicMock(return_value=fake_backup)
scenario._delete_volume = mock.MagicMock()
scenario._delete_backup = mock.MagicMock()
scenario.create_volume_backup(1, do_delete=True, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario._create_backup.assert_called_once_with(fake_volume.id,
fakearg="f")
scenario._delete_volume.assert_called_once_with(fake_volume)
scenario._delete_backup.assert_called_once_with(fake_backup)
def test_create_volume_backup_no_delete(self):
fake_volume = mock.MagicMock()
fake_backup = mock.MagicMock()
scenario = volumes.CinderVolumes()
scenario._create_volume = mock.MagicMock(return_value=fake_volume)
scenario._create_backup = mock.MagicMock(return_value=fake_backup)
scenario._delete_volume = mock.MagicMock()
scenario._delete_backup = mock.MagicMock()
scenario.create_volume_backup(1, do_delete=False, fakearg="f")
scenario._create_volume.assert_called_once_with(1, fakearg="f")
scenario._create_backup.assert_called_once_with(fake_volume.id,
fakearg="f")
self.assertFalse(scenario._delete_volume.called)
self.assertFalse(scenario._delete_backup.called)

View File

@ -0,0 +1,99 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.benchmark.scenarios.nova import servers
from rally import objects
from rally import osclients
from rally.plugins.openstack.scenarios.glance import images
from tests.unit import fakes
from tests.unit import test
GLANCE_IMAGES = "rally.plugins.openstack.scenarios.glance.images.GlanceImages"
class GlanceImagesTestCase(test.TestCase):
@mock.patch(GLANCE_IMAGES + "._generate_random_name")
@mock.patch(GLANCE_IMAGES + "._list_images")
@mock.patch(GLANCE_IMAGES + "._create_image")
def test_create_and_list_image(self, mock_create, mock_list,
mock_random_name):
glance_scenario = images.GlanceImages()
mock_random_name.return_value = "test-rally-image"
glance_scenario.create_and_list_image("cf", "url", "df",
fakearg="f")
mock_create.assert_called_once_with("cf", "url", "df",
fakearg="f")
mock_list.assert_called_once_with()
@mock.patch(GLANCE_IMAGES + "._list_images")
def test_list_images(self, mock_list):
glance_scenario = images.GlanceImages()
glance_scenario.list_images()
mock_list.assert_called_once_with()
@mock.patch(GLANCE_IMAGES + "._generate_random_name")
@mock.patch(GLANCE_IMAGES + "._delete_image")
@mock.patch(GLANCE_IMAGES + "._create_image")
def test_create_and_delete_image(self, mock_create, mock_delete,
mock_random_name):
glance_scenario = images.GlanceImages()
fake_image = object()
mock_create.return_value = fake_image
mock_random_name.return_value = "test-rally-image"
glance_scenario.create_and_delete_image("cf", "url", "df",
fakearg="f")
mock_create.assert_called_once_with("cf",
"url", "df", fakearg="f")
mock_delete.assert_called_once_with(fake_image)
@mock.patch(GLANCE_IMAGES + "._generate_random_name")
@mock.patch(GLANCE_IMAGES + "._boot_servers")
@mock.patch(GLANCE_IMAGES + "._create_image")
@mock.patch("rally.benchmark.runners.base.osclients")
def test_create_image_and_boot_instances(self,
mock_osclients,
mock_create_image,
mock_boot_servers,
mock_random_name):
glance_scenario = images.GlanceImages()
nova_scenario = servers.NovaServers()
fc = fakes.FakeClients()
mock_osclients.Clients.return_value = fc
fake_glance = fakes.FakeGlanceClient()
fc.glance = lambda: fake_glance
fake_nova = fakes.FakeNovaClient()
fc.nova = lambda: fake_nova
user_endpoint = objects.Endpoint("url", "user", "password", "tenant")
nova_scenario._clients = osclients.Clients(user_endpoint)
fake_image = fakes.FakeImage()
fake_servers = [object() for i in range(5)]
mock_create_image.return_value = fake_image
mock_boot_servers.return_value = fake_servers
mock_random_name.return_value = "random_name"
kwargs = {"fakearg": "f"}
with mock.patch("rally.plugins.openstack.scenarios."
"glance.utils.time.sleep"):
glance_scenario.create_image_and_boot_instances("cf", "url",
"df", "fid",
5, **kwargs)
mock_create_image.assert_called_once_with("cf",
"url", "df")
mock_boot_servers.assert_called_once_with("random_name",
"image-id-0",
"fid", 5, **kwargs)

View File

@ -0,0 +1,111 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempfile
import mock
from oslotest import mockpatch
from rally.benchmark import utils as butils
from rally import exceptions as rally_exceptions
from rally.plugins.openstack.scenarios.glance import utils
from tests.unit import fakes
from tests.unit import test
BM_UTILS = "rally.benchmark.utils"
GLANCE_UTILS = "rally.plugins.openstack.scenarios.glance.utils"
class GlanceScenarioTestCase(test.TestCase):
def setUp(self):
super(GlanceScenarioTestCase, self).setUp()
self.image = mock.Mock()
self.image1 = mock.Mock()
self.res_is = mockpatch.Patch(BM_UTILS + ".resource_is")
self.get_fm = mockpatch.Patch(BM_UTILS + ".get_from_manager")
self.wait_for = mockpatch.Patch(GLANCE_UTILS + ".bench_utils.wait_for")
self.wait_for_delete = mockpatch.Patch(
GLANCE_UTILS + ".bench_utils.wait_for_delete")
self.useFixture(self.wait_for)
self.useFixture(self.wait_for_delete)
self.useFixture(self.res_is)
self.useFixture(self.get_fm)
self.gfm = self.get_fm.mock
self.useFixture(mockpatch.Patch("time.sleep"))
self.scenario = utils.GlanceScenario()
def test_failed_image_status(self):
self.get_fm.cleanUp()
image_manager = fakes.FakeFailedImageManager()
self.assertRaises(rally_exceptions.GetResourceFailure,
butils.get_from_manager(),
image_manager.create("fails", "url", "cf", "df"))
@mock.patch(GLANCE_UTILS + ".GlanceScenario.clients")
def test_list_images(self, mock_clients):
images_list = []
mock_clients("glance").images.list.return_value = images_list
scenario = utils.GlanceScenario()
return_images_list = scenario._list_images()
self.assertEqual(images_list, return_images_list)
self._test_atomic_action_timer(scenario.atomic_actions(),
"glance.list_images")
@mock.patch(GLANCE_UTILS + ".GlanceScenario.clients")
def test_create_image(self, mock_clients):
image_location = tempfile.NamedTemporaryFile()
mock_clients("glance").images.create.return_value = self.image
scenario = utils.GlanceScenario()
return_image = scenario._create_image("container_format",
image_location.name,
"disk_format")
self.wait_for.mock.assert_called_once_with(self.image,
update_resource=self.gfm(),
is_ready=self.res_is.mock(),
check_interval=1,
timeout=120)
self.res_is.mock.assert_has_calls([mock.call("active")])
self.assertEqual(self.wait_for.mock(), return_image)
self._test_atomic_action_timer(scenario.atomic_actions(),
"glance.create_image")
@mock.patch(GLANCE_UTILS + ".GlanceScenario.clients")
def test_create_image_with_location(self, mock_clients):
mock_clients("glance").images.create.return_value = self.image
scenario = utils.GlanceScenario()
return_image = scenario._create_image("container_format",
"image_location",
"disk_format")
self.wait_for.mock.assert_called_once_with(self.image,
update_resource=self.gfm(),
is_ready=self.res_is.mock(),
check_interval=1,
timeout=120)
self.res_is.mock.assert_has_calls([mock.call("active")])
self.assertEqual(self.wait_for.mock(), return_image)
self._test_atomic_action_timer(scenario.atomic_actions(),
"glance.create_image")
def test_delete_image(self):
scenario = utils.GlanceScenario()
scenario._delete_image(self.image)
self.image.delete.assert_called_once_with()
self.wait_for_delete.mock.assert_called_once_with(
self.image,
update_resource=self.gfm(),
check_interval=1,
timeout=120)
self._test_atomic_action_timer(scenario.atomic_actions(),
"glance.delete_image")

View File

@ -0,0 +1,116 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.sahara import clusters
from tests.unit import test
SAHARA_CLUSTERS = ("rally.plugins.openstack.scenarios.sahara.clusters"
".SaharaClusters")
SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils"
class SaharaClustersTestCase(test.TestCase):
@mock.patch(SAHARA_CLUSTERS + "._delete_cluster")
@mock.patch(SAHARA_CLUSTERS + "._launch_cluster",
return_value=mock.MagicMock(id=42))
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_and_delete_cluster(self, mock_clients, mock_launch_cluster,
mock_delete_cluster):
clusters_scenario = clusters.SaharaClusters()
clusters_scenario.context = {
"tenant": {
"sahara_image": "test_image"
}
}
clusters_scenario.create_and_delete_cluster(
flavor="test_flavor",
workers_count=5,
plugin_name="test_plugin",
hadoop_version="test_version")
mock_launch_cluster.assert_called_once_with(
flavor_id="test_flavor",
image_id="test_image",
workers_count=5,
plugin_name="test_plugin",
hadoop_version="test_version",
floating_ip_pool=None,
volumes_per_node=None,
volumes_size=None,
auto_security_group=None,
security_groups=None,
node_configs=None,
cluster_configs=None,
enable_anti_affinity=False)
mock_delete_cluster.assert_called_once_with(
mock_launch_cluster.return_value)
@mock.patch(SAHARA_CLUSTERS + "._delete_cluster")
@mock.patch(SAHARA_CLUSTERS + "._scale_cluster")
@mock.patch(SAHARA_CLUSTERS + "._launch_cluster",
return_value=mock.MagicMock(id=42))
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_scale_delete_cluster(self, mock_clients,
mock_launch_cluster,
mock_scale_cluster,
mock_delete_cluster):
mock_sahara = mock_clients("sahara")
mock_sahara.clusters.get.return_value = mock.MagicMock(
id=42, status="active"
)
clusters_scenario = clusters.SaharaClusters()
clusters_scenario.context = {
"tenant": {
"sahara_image": "test_image"
}
}
clusters_scenario.create_scale_delete_cluster(
flavor="test_flavor",
workers_count=5,
deltas=[1, -1],
plugin_name="test_plugin",
hadoop_version="test_version")
mock_launch_cluster.assert_called_once_with(
flavor_id="test_flavor",
image_id="test_image",
workers_count=5,
plugin_name="test_plugin",
hadoop_version="test_version",
floating_ip_pool=None,
volumes_per_node=None,
volumes_size=None,
auto_security_group=None,
security_groups=None,
node_configs=None,
cluster_configs=None,
enable_anti_affinity=False)
mock_scale_cluster.assert_has_calls([
mock.call(mock_sahara.clusters.get.return_value, 1),
mock.call(mock_sahara.clusters.get.return_value, -1),
])
mock_delete_cluster.assert_called_once_with(
mock_sahara.clusters.get.return_value)

View File

@ -0,0 +1,237 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from rally.plugins.openstack.scenarios.sahara import jobs
from tests.unit import test
CONF = cfg.CONF
SAHARA_JOB = "rally.plugins.openstack.scenarios.sahara.jobs.SaharaJob"
SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils"
class SaharaJobTestCase(test.TestCase):
def setUp(self):
super(SaharaJobTestCase, self).setUp()
CONF.set_override("cluster_check_interval", 0, "benchmark")
CONF.set_override("job_check_interval", 0, "benchmark")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_launch_job_java(self, mock_osclients, mock_run_execution,
mock_random_name):
mock_sahara = mock_osclients("sahara")
mock_sahara.jobs.create.return_value = mock.MagicMock(id="42")
jobs_scenario = jobs.SaharaJob()
jobs_scenario.context = {
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
}
jobs_scenario.create_launch_job(
job_type="java",
configs={"conf_key": "conf_val"},
job_idx=0
)
mock_sahara.jobs.create.assert_called_once_with(
name=mock_random_name.return_value,
type="java",
description="",
mains=["main_42"],
libs=["lib_42"]
)
mock_run_execution.assert_called_once_with(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key": "conf_val"},
job_idx=0
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
@mock.patch(SAHARA_JOB + "._create_output_ds",
return_value=mock.MagicMock(id="out_42"))
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_launch_job_pig(self, mock_osclients, mock_create_ds,
mock_run_execution, mock_random_name):
mock_sahara = mock_osclients("sahara")
mock_sahara.jobs.create.return_value = mock.MagicMock(id="42")
jobs_scenario = jobs.SaharaJob()
jobs_scenario.context = {
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
}
jobs_scenario.create_launch_job(
job_type="pig",
configs={"conf_key": "conf_val"},
job_idx=0
)
mock_sahara.jobs.create.assert_called_once_with(
name=mock_random_name.return_value,
type="pig",
description="",
mains=["main_42"],
libs=["lib_42"]
)
mock_run_execution.assert_called_once_with(
job_id="42",
cluster_id="cl_42",
input_id="in_42",
output_id="out_42",
configs={"conf_key": "conf_val"},
job_idx=0
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_launch_job_sequence(self, mock_osclients,
mock_run_execution, mock_random_name):
mock_sahara = mock_osclients("sahara")
mock_sahara.jobs.create.return_value = mock.MagicMock(id="42")
jobs_scenario = jobs.SaharaJob()
jobs_scenario.context = {
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
}
jobs_scenario.create_launch_job_sequence(
jobs=[
{
"job_type": "java",
"configs": {"conf_key": "conf_val"}
}, {
"job_type": "java",
"configs": {"conf_key2": "conf_val2"}
}])
jobs_create_call = mock.call(
name=mock_random_name.return_value,
type="java",
description="",
mains=["main_42"],
libs=["lib_42"])
mock_sahara.jobs.create.assert_has_calls([jobs_create_call,
jobs_create_call])
mock_run_execution.assert_has_calls([
mock.call(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key": "conf_val"},
job_idx=0),
mock.call(
job_id="42",
cluster_id="cl_42",
input_id=None,
output_id=None,
configs={"conf_key2": "conf_val2"},
job_idx=1)]
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="job_42")
@mock.patch(SAHARA_JOB + "._run_job_execution")
@mock.patch(SAHARA_JOB + "._scale_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_launch_job_sequence_with_scaling(self, mock_osclients,
mock_scale,
mock_run_execution,
mock_random_name):
mock_sahara = mock_osclients("sahara")
mock_sahara.jobs.create.return_value = mock.MagicMock(id="42")
mock_sahara.clusters.get.return_value = mock.MagicMock(
id="cl_42",
status="active")
jobs_scenario = jobs.SaharaJob()
jobs_scenario.context = {
"tenant": {
"sahara_image": "test_image",
"sahara_mains": ["main_42"],
"sahara_libs": ["lib_42"],
"sahara_cluster": "cl_42",
"sahara_input": "in_42"
}
}
jobs_scenario.create_launch_job_sequence_with_scaling(
jobs=[
{
"job_type": "java",
"configs": {"conf_key": "conf_val"}
}, {
"job_type": "java",
"configs": {"conf_key2": "conf_val2"}
}],
deltas=[1, -1])
jobs_create_call = mock.call(
name=mock_random_name.return_value,
type="java",
description="",
mains=["main_42"],
libs=["lib_42"])
mock_sahara.jobs.create.assert_has_calls([jobs_create_call,
jobs_create_call])
je_0 = mock.call(job_id="42", cluster_id="cl_42", input_id=None,
output_id=None, configs={"conf_key": "conf_val"},
job_idx=0)
je_1 = mock.call(job_id="42", cluster_id="cl_42", input_id=None,
output_id=None,
configs={"conf_key2": "conf_val2"}, job_idx=1)
mock_run_execution.assert_has_calls([je_0, je_1, je_0, je_1, je_0,
je_1])

View File

@ -0,0 +1,78 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.sahara import (node_group_templates
as ngts)
from tests.unit import test
SAHARA_NGTS = ("rally.plugins.openstack.scenarios.sahara.node_group_templates"
".SaharaNodeGroupTemplates")
class SaharaNodeGroupTemplatesTestCase(test.TestCase):
@mock.patch(SAHARA_NGTS + "._list_node_group_templates")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=object())
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=object)
def test_create_and_list_node_group_templates(self, mock_create_worker,
mock_create_master,
mock_list):
ngts_scenario = ngts.SaharaNodeGroupTemplates()
ngts_scenario.create_and_list_node_group_templates("test_flavor",
"test_plugin",
"test_version")
mock_create_master.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock_create_worker.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock_list.assert_called_once_with()
@mock.patch(SAHARA_NGTS + "._delete_node_group_template")
@mock.patch(SAHARA_NGTS + "._create_master_node_group_template",
return_value=mock.MagicMock(id=1))
@mock.patch(SAHARA_NGTS + "._create_worker_node_group_template",
return_value=mock.MagicMock(id=2))
def test_create_delete_node_group_templates(self, mock_create_worker,
mock_create_master,
mock_delete):
ngts_scenario = ngts.SaharaNodeGroupTemplates()
ngts_scenario.create_delete_node_group_templates(
"test_flavor",
"test_plugin",
"test_version")
mock_create_master.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock_create_worker.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version")
mock_delete.assert_has_calls(calls=[
mock.call(mock_create_master.return_value),
mock.call(mock_create_worker.return_value)])

View File

@ -0,0 +1,415 @@
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from saharaclient.api import base as sahara_base
from rally import consts
from rally import exceptions
from rally.plugins.openstack.scenarios.sahara import utils
from tests.unit import test
CONF = cfg.CONF
SAHARA_UTILS = "rally.plugins.openstack.scenarios.sahara.utils"
class SaharaUtilsTestCase(test.TestCase):
def setUp(self):
super(SaharaUtilsTestCase, self).setUp()
CONF.set_override("cluster_check_interval", 0, "benchmark")
CONF.set_override("job_check_interval", 0, "benchmark")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_list_node_group_templates(self, mock_clients):
ngts = []
mock_clients("sahara").node_group_templates.list.return_value = ngts
scenario = utils.SaharaScenario()
return_ngts_list = scenario._list_node_group_templates()
self.assertEqual(ngts, return_ngts_list)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.list_node_group_templates")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_create_node_group_templates(self, mock_constants, mock_clients,
mock_random_name):
scenario = utils.SaharaScenario()
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_constants.NODE_PROCESSES = mock_processes
scenario._create_master_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version"
)
scenario._create_worker_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version"
)
create_calls = [
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p1"]),
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p2"]
)]
mock_clients("sahara").node_group_templates.create.assert_has_calls(
create_calls)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_master_node_group_template")
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_worker_node_group_template")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_delete_node_group_templates(self, mock_clients):
scenario = utils.SaharaScenario()
ng = mock.MagicMock(id=42)
scenario._delete_node_group_template(ng)
delete_mock = mock_clients("sahara").node_group_templates.delete
delete_mock.assert_called_once_with(42)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_node_group_template")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster(self, mock_constants,
mock_clients, mock_random_name):
clients_values = mock.MagicMock(return_value=[consts.Service.NEUTRON])
mock_clients.services.return_value = mock.MagicMock(
values=clients_values)
context = {
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
}
scenario = utils.SaharaScenario(context=context, clients=mock_clients)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}, {
"name": "worker-ng",
"flavor_id": "test_flavor",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 42,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
}
]
mock_constants.NODE_PROCESSES = mock_processes
mock_constants.REPLICATION_CONFIGS = mock_configs
mock_clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
status="active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}}
)
mock_clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_error(self, mock_constants, mock_clients,
mock_random_name):
scenario = utils.SaharaScenario(clients=mock.MagicMock())
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
mock_constants.NODE_PROCESSES = mock_processes
mock_constants.REPLICATION_CONFIGS = mock_configs
mock_clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
status="error")
self.assertRaises(exceptions.SaharaClusterFailure,
scenario._launch_cluster,
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
image_id="test_image",
floating_ip_pool="test_pool",
volumes_per_node=5,
volumes_size=10,
workers_count=42,
node_configs={"HDFS": {"local_config":
"local_value"}})
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_scale_cluster(self, mock_clients):
scenario = utils.SaharaScenario()
cluster = mock.MagicMock(id=42, node_groups=[{
"name": "random_master",
"count": 1
}, {
"name": "random_worker",
"count": 41
}])
mock_clients("sahara").clusters.get.return_value = mock.MagicMock(
id=42,
status="active")
expected_scale_object = {
"resize_node_groups": [{
"name": "random_worker",
"count": 42
}]
}
scenario._scale_cluster(cluster, 1)
mock_clients("sahara").clusters.scale.assert_called_once_with(
42, expected_scale_object)
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_delete_cluster(self, mock_clients):
scenario = utils.SaharaScenario()
cluster = mock.MagicMock(id=42)
mock_clients("sahara").clusters.get.side_effect = [
cluster, sahara_base.APIException()
]
scenario._delete_cluster(cluster)
delete_mock = mock_clients("sahara").clusters.delete
delete_mock.assert_called_once_with(42)
cl_get_expected = mock.call(42)
mock_clients("sahara").clusters.get.assert_has_calls([cl_get_expected,
cl_get_expected])
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="42")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_output_ds(self, mock_clients, mock_random_name):
ctxt = {
"sahara_output_conf": {
"output_type": "hdfs",
"output_url_prefix": "hdfs://test_out/"
}
}
scenario = utils.SaharaScenario(ctxt)
scenario._create_output_ds()
mock_clients("sahara").data_sources.create.assert_called_once_with(
name="42",
description="",
data_source_type="hdfs",
url="hdfs://test_out/42"
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario._generate_random_name",
return_value="42")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_create_output_ds_swift(self, mock_clients, mock_random_name):
ctxt = {
"sahara_output_conf": {
"output_type": "swift",
"output_url_prefix": "swift://test_out/"
}
}
scenario = utils.SaharaScenario(ctxt)
self.assertRaises(exceptions.RallyException,
scenario._create_output_ds)
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_run_job_execution(self, mock_clients):
mock_clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "SUCCESS"}, id="42")]
mock_clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario()
scenario._run_job_execution(job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
mock_clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
je_get_expected = mock.call("42")
mock_clients("sahara").job_executions.get.assert_has_calls(
[je_get_expected, je_get_expected]
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario.clients")
def test_run_job_execution_fail(self, mock_clients):
mock_clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "killed"}, id="42")]
mock_clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario()
self.assertRaises(exceptions.RallyException,
scenario._run_job_execution,
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
mock_clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)

View File

@ -0,0 +1,144 @@
# Copyright 2015 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.swift import objects
from tests.unit import fakes
from tests.unit import test
class SwiftObjectsTestCase(test.TestCase):
def test_create_container_and_object_then_list_objects(self):
scenario = objects.SwiftObjects()
scenario._create_container = mock.MagicMock(return_value="AA")
scenario._upload_object = mock.MagicMock()
scenario._list_objects = mock.MagicMock()
scenario.create_container_and_object_then_list_objects(
objects_per_container=5,
object_size=100)
self.assertEqual(1, scenario._create_container.call_count)
self.assertEqual(5, scenario._upload_object.call_count)
scenario._list_objects.assert_called_once_with("AA")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_5_objects")
def test_create_container_and_object_then_delete_all(self):
scenario = objects.SwiftObjects()
scenario._create_container = mock.MagicMock(return_value="BB")
scenario._upload_object = mock.MagicMock(
side_effect=[("etaaag", "ooobj_%i" % i) for i in range(3)])
scenario._delete_object = mock.MagicMock()
scenario._delete_container = mock.MagicMock()
scenario.create_container_and_object_then_delete_all(
objects_per_container=3,
object_size=10)
self.assertEqual(1, scenario._create_container.call_count)
self.assertEqual(3, scenario._upload_object.call_count)
scenario._delete_object.assert_has_calls(
[mock.call("BB", "ooobj_%i" % i,
atomic_action=False) for i in range(3)])
scenario._delete_container.assert_called_once_with("BB")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_3_objects")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.delete_3_objects")
def test_create_container_and_object_then_download_object(self):
scenario = objects.SwiftObjects()
scenario._create_container = mock.MagicMock(return_value="CC")
scenario._upload_object = mock.MagicMock(
side_effect=[("etaaaag", "obbbj_%i" % i) for i in range(2)])
scenario._download_object = mock.MagicMock()
scenario.create_container_and_object_then_download_object(
objects_per_container=2,
object_size=50)
self.assertEqual(1, scenario._create_container.call_count)
self.assertEqual(2, scenario._upload_object.call_count)
scenario._download_object.assert_has_calls(
[mock.call("CC", "obbbj_%i" % i,
atomic_action=False) for i in range(2)])
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_2_objects")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.download_2_objects")
def test_functional_create_container_and_object_then_list_objects(self):
names_list = ["AA", "BB", "CC", "DD"]
scenario = objects.SwiftObjects(clients=fakes.FakeClients())
scenario._generate_random_name = mock.MagicMock(side_effect=names_list)
scenario._list_objects = mock.MagicMock()
scenario.create_container_and_object_then_list_objects(
objects_per_container=3,
object_size=100)
scenario._list_objects.assert_called_once_with("AA")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_3_objects")
def test_functional_create_container_and_object_then_delete_all(self):
names_list = ["111", "222", "333", "444", "555"]
scenario = objects.SwiftObjects(clients=fakes.FakeClients())
scenario._generate_random_name = mock.MagicMock(side_effect=names_list)
scenario._delete_object = mock.MagicMock()
scenario._delete_container = mock.MagicMock()
scenario.create_container_and_object_then_delete_all(
objects_per_container=4,
object_size=240)
scenario._delete_object.assert_has_calls(
[mock.call("111", name,
atomic_action=False) for name in names_list[1:]])
scenario._delete_container.assert_called_once_with("111")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_4_objects")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.delete_4_objects")
def test_functional_create_container_and_object_then_download_object(self):
names_list = ["aaa", "bbb", "ccc", "ddd", "eee", "fff"]
scenario = objects.SwiftObjects(clients=fakes.FakeClients())
scenario._generate_random_name = mock.MagicMock(side_effect=names_list)
scenario._download_object = mock.MagicMock()
scenario.create_container_and_object_then_download_object(
objects_per_container=5,
object_size=750)
scenario._download_object.assert_has_calls(
[mock.call("aaa", name,
atomic_action=False) for name in names_list[1:]])
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_5_objects")
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.download_5_objects")

View File

@ -0,0 +1,202 @@
# Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.swift import utils
from tests.unit import test
SWIFT_UTILS = "rally.plugins.openstack.scenarios.swift.utils"
class SwiftScenarioTestCase(test.TestCase):
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__list_containers(self, mock_clients):
headers_dict = mock.MagicMock()
containers_list = mock.MagicMock()
mock_clients("swift").get_account.return_value = (headers_dict,
containers_list)
scenario = utils.SwiftScenario()
self.assertEqual((headers_dict, containers_list),
scenario._list_containers(fargs="f"))
kw = {"full_listing": True, "fargs": "f"}
mock_clients("swift").get_account.assert_called_once_with(**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.list_containers")
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__create_container(self, mock_clients):
container_name = mock.MagicMock()
scenario = utils.SwiftScenario()
# name + public=True + kw
self.assertEqual(container_name,
scenario._create_container(container_name,
public=True, fargs="f"))
kw = {"headers": {"X-Container-Read": ".r:*,.rlistings"}, "fargs": "f"}
mock_clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# name + public=True + additional header + kw
mock_clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(container_name,
public=True,
headers={"X-fake-name":
"fake-value"},
fargs="f"))
kw = {"headers": {"X-Container-Read": ".r:*,.rlistings",
"X-fake-name": "fake-value"}, "fargs": "f"}
mock_clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# name + public=False + additional header + kw
mock_clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(container_name,
public=False,
headers={"X-fake-name":
"fake-value"},
fargs="f"))
kw = {"headers": {"X-fake-name": "fake-value"}, "fargs": "f"}
mock_clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# name + kw
mock_clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(container_name, fargs="f"))
kw = {"fargs": "f"}
mock_clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# kw
scenario._generate_random_name = mock.MagicMock(
return_value=container_name)
mock_clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(fargs="f"))
kw = {"fargs": "f"}
mock_clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
self.assertEqual(1, scenario._generate_random_name.call_count)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_container")
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__delete_container(self, mock_clients):
container_name = mock.MagicMock()
scenario = utils.SwiftScenario()
scenario._delete_container(container_name, fargs="f")
kw = {"fargs": "f"}
mock_clients("swift").delete_container.assert_called_once_with(
container_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.delete_container")
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__list_objects(self, mock_clients):
container_name = mock.MagicMock()
headers_dict = mock.MagicMock()
objects_list = mock.MagicMock()
mock_clients("swift").get_container.return_value = (headers_dict,
objects_list)
scenario = utils.SwiftScenario()
self.assertEqual((headers_dict, objects_list),
scenario._list_objects(container_name, fargs="f"))
kw = {"full_listing": True, "fargs": "f"}
mock_clients("swift").get_container.assert_called_once_with(
container_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.list_objects")
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__upload_object(self, mock_clients):
container_name = mock.MagicMock()
object_name = mock.MagicMock()
content = mock.MagicMock()
etag = mock.MagicMock()
mock_clients("swift").put_object.return_value = etag
scenario = utils.SwiftScenario()
# container + content + name + kw
self.assertEqual((etag, object_name),
scenario._upload_object(container_name, content,
object_name=object_name,
fargs="f"))
kw = {"fargs": "f"}
mock_clients("swift").put_object.assert_called_once_with(
container_name, object_name,
content, **kw)
# container + content + kw
scenario._generate_random_name = mock.MagicMock(
return_value=object_name)
mock_clients("swift").put_object.reset_mock()
self.assertEqual((etag, object_name),
scenario._upload_object(container_name, content,
fargs="f"))
kw = {"fargs": "f"}
mock_clients("swift").put_object.assert_called_once_with(
container_name, object_name,
content, **kw)
self.assertEqual(1, scenario._generate_random_name.call_count)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.upload_object")
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__download_object(self, mock_clients):
container_name = mock.MagicMock()
object_name = mock.MagicMock()
headers_dict = mock.MagicMock()
content = mock.MagicMock()
mock_clients("swift").get_object.return_value = (headers_dict, content)
scenario = utils.SwiftScenario()
self.assertEqual((headers_dict, content),
scenario._download_object(container_name, object_name,
fargs="f"))
kw = {"fargs": "f"}
mock_clients("swift").get_object.assert_called_once_with(
container_name, object_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.download_object")
@mock.patch(SWIFT_UTILS + ".SwiftScenario.clients")
def test__delete_object(self, mock_clients):
container_name = mock.MagicMock()
object_name = mock.MagicMock()
scenario = utils.SwiftScenario()
scenario._delete_object(container_name, object_name, fargs="f")
kw = {"fargs": "f"}
mock_clients("swift").delete_object.assert_called_once_with(
container_name, object_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.delete_object")