Add batching for samples creation in context
Performance Ceilometer scenarios required a great amount of data, that means that if something happens during samples are storing the test fails. Batching improves it. Storing with batching has the same rate as if whole sample list is stored, but choosing batch size not greater then 1% of all samples makes it losing not critical for the test. Change-Id: I96cfbdb6f0f8c20363b4be11cac488f6459f81e2
This commit is contained in:
parent
f637814243
commit
47b60c4f62
@ -495,7 +495,7 @@
|
||||
counter_unit: "instance"
|
||||
counter_volume: 1.0
|
||||
resources_per_tenant: 3
|
||||
samples_per_resource: 3
|
||||
samples_per_resource: 10
|
||||
timestamp_interval: 60
|
||||
metadata_list:
|
||||
- status: "active"
|
||||
@ -506,6 +506,7 @@
|
||||
name: "fake_resource_1"
|
||||
deleted: "False"
|
||||
created_at: "2015-09-10T06:55:12.000000"
|
||||
batch_size: 5
|
||||
sla:
|
||||
failure_rate:
|
||||
max: 0
|
||||
|
@ -18,6 +18,7 @@ from rally.common.i18n import _
|
||||
from rally.common import log as logging
|
||||
from rally.common import utils as rutils
|
||||
from rally import consts
|
||||
from rally import exceptions
|
||||
from rally.plugins.openstack.scenarios.ceilometer import utils as ceilo_utils
|
||||
from rally.task import context
|
||||
|
||||
@ -77,6 +78,14 @@ class CeilometerSampleGenerator(context.Context):
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"batch_size": {
|
||||
"type": "integer",
|
||||
"minimum": 1
|
||||
},
|
||||
"batches_allow_lose": {
|
||||
"type": "integer",
|
||||
"minimum": 0
|
||||
}
|
||||
},
|
||||
"required": ["counter_name", "counter_type", "counter_unit",
|
||||
@ -90,6 +99,22 @@ class CeilometerSampleGenerator(context.Context):
|
||||
"timestamp_interval": 60
|
||||
}
|
||||
|
||||
def _store_batch_samples(self, scenario, batches, batches_allow_lose):
|
||||
batches_allow_lose = batches_allow_lose or 0
|
||||
unsuccess = 0
|
||||
for i, batch in enumerate(batches, start=1):
|
||||
try:
|
||||
samples = scenario._create_samples(batch)
|
||||
except Exception:
|
||||
unsuccess += 1
|
||||
LOG.warning(_("Failed to store batch %d of Ceilometer samples"
|
||||
" during context creation") % i)
|
||||
if unsuccess > batches_allow_lose:
|
||||
raise exceptions.ContextSetupFailure(
|
||||
ctx_name=self.get_name(),
|
||||
msg=_("Context failed to store too many batches of samples"))
|
||||
return samples
|
||||
|
||||
@logging.log_task_wrapper(LOG.info, _("Enter context: `Ceilometer`"))
|
||||
def setup(self):
|
||||
new_sample = {
|
||||
@ -110,8 +135,12 @@ class CeilometerSampleGenerator(context.Context):
|
||||
count=self.config["samples_per_resource"],
|
||||
interval=self.config["timestamp_interval"],
|
||||
metadata_list=self.config.get("metadata_list"),
|
||||
batch_size=self.config.get("batch_size"),
|
||||
**new_sample)
|
||||
samples = scenario._create_samples(samples_to_create)
|
||||
samples = self._store_batch_samples(
|
||||
scenario, samples_to_create,
|
||||
self.config.get("batches_allow_lose")
|
||||
)
|
||||
for sample in samples:
|
||||
self.context["tenants"][tenant_id]["samples"].append(
|
||||
sample.to_dict())
|
||||
|
@ -28,7 +28,7 @@ class CeilometerScenario(scenario.OpenStackScenario):
|
||||
def _make_samples(self, count=1, interval=0, counter_name="cpu_util",
|
||||
counter_type="gauge", counter_unit="%", counter_volume=1,
|
||||
project_id=None, user_id=None, source=None,
|
||||
timestamp=None, metadata_list=None):
|
||||
timestamp=None, metadata_list=None, batch_size=None):
|
||||
"""Prepare and return a list of samples.
|
||||
|
||||
:param count: specifies number of samples in array
|
||||
@ -43,9 +43,10 @@ class CeilometerScenario(scenario.OpenStackScenario):
|
||||
:param source: specifies source for samples
|
||||
:param timestamp: specifies timestamp for samples
|
||||
:param metadata_list: specifies list of resource metadata
|
||||
:returns: list of samples used to create samples
|
||||
:param batch_size: specifies number of samples to store in one query
|
||||
:returns: generator that produces lists of samples
|
||||
"""
|
||||
samples = []
|
||||
batch_size = batch_size or count
|
||||
sample = {
|
||||
"counter_name": counter_name,
|
||||
"counter_type": counter_type,
|
||||
@ -62,9 +63,13 @@ class CeilometerScenario(scenario.OpenStackScenario):
|
||||
for k, v in six.iteritems(opt_fields):
|
||||
if v:
|
||||
sample.update({k: v})
|
||||
now = timestamp or datetime.datetime.utcnow()
|
||||
len_meta = len(metadata_list) if metadata_list else 0
|
||||
now = timestamp or datetime.datetime.utcnow()
|
||||
samples = []
|
||||
for i in six.moves.xrange(count):
|
||||
if i and not (i % batch_size):
|
||||
yield samples
|
||||
samples = []
|
||||
sample_item = dict(sample)
|
||||
sample_item["timestamp"] = (
|
||||
now - datetime.timedelta(seconds=(interval * i))
|
||||
@ -76,8 +81,7 @@ class CeilometerScenario(scenario.OpenStackScenario):
|
||||
i * len_meta // count
|
||||
]
|
||||
samples.append(sample_item)
|
||||
|
||||
return samples
|
||||
yield samples
|
||||
|
||||
def _make_query_item(self, field, op="eq", value=None):
|
||||
"""Create a SimpleQuery item for requests.
|
||||
|
@ -26,7 +26,8 @@
|
||||
{"status": "not_active", "name": "fake_resource_1",
|
||||
"deleted": "False",
|
||||
"created_at": "2015-09-10T06:55:12.000000"}
|
||||
]
|
||||
],
|
||||
"batch_size": 5
|
||||
}
|
||||
},
|
||||
"args":{
|
||||
|
@ -26,6 +26,7 @@
|
||||
name: "fake_resource_1"
|
||||
deleted: "False"
|
||||
created_at: "2015-09-10T06:55:12.000000"
|
||||
batch_size: 5
|
||||
args:
|
||||
limit: 50
|
||||
metadata_query:
|
||||
|
@ -115,7 +115,6 @@ class CeilometerSampleGeneratorTestCase(test.TestCase):
|
||||
"counter_type": "fake-counter-type",
|
||||
"counter_unit": "fake-counter-unit",
|
||||
"counter_volume": 100,
|
||||
"resource_id": "fake-resource-id",
|
||||
"metadata_list": [
|
||||
{"status": "active", "name": "fake_resource",
|
||||
"deleted": "False",
|
||||
@ -128,9 +127,10 @@ class CeilometerSampleGeneratorTestCase(test.TestCase):
|
||||
scenario.generate_random_name = mock.Mock(
|
||||
return_value="fake_resource-id")
|
||||
kwargs = copy.deepcopy(sample)
|
||||
kwargs.pop("resource_id")
|
||||
samples_to_create = scenario._make_samples(count=samples_per_resource,
|
||||
interval=60, **kwargs)
|
||||
samples_to_create = list(
|
||||
scenario._make_samples(count=samples_per_resource, interval=60,
|
||||
**kwargs)
|
||||
)[0]
|
||||
new_context = copy.deepcopy(real_context)
|
||||
for id_ in tenants.keys():
|
||||
new_context["tenants"][id_].setdefault("samples", [])
|
||||
|
@ -30,21 +30,43 @@ class CeilometerScenarioTestCase(test.ScenarioTestCase):
|
||||
super(CeilometerScenarioTestCase, self).setUp()
|
||||
self.scenario = utils.CeilometerScenario(self.context)
|
||||
|
||||
def test__make_samples(self):
|
||||
def test__make_samples_no_batch_size(self):
|
||||
self.scenario.generate_random_name = mock.Mock(
|
||||
return_value="fake_resource")
|
||||
test_timestamp = datetime.datetime(2015, 10, 20, 14, 18, 40)
|
||||
result = self.scenario._make_samples(count=2, interval=60,
|
||||
timestamp=test_timestamp)
|
||||
result = list(self.scenario._make_samples(count=2, interval=60,
|
||||
timestamp=test_timestamp))
|
||||
self.assertEqual(1, len(result))
|
||||
expected = {"counter_name": "cpu_util",
|
||||
"counter_type": "gauge",
|
||||
"counter_unit": "%",
|
||||
"counter_volume": 1,
|
||||
"resource_id": "fake_resource",
|
||||
"timestamp": test_timestamp.isoformat()}
|
||||
self.assertEqual(expected, result[0])
|
||||
samples_int = (parser.parse(result[0]["timestamp"]) -
|
||||
parser.parse(result[1]["timestamp"])).seconds
|
||||
self.assertEqual(expected, result[0][0])
|
||||
samples_int = (parser.parse(result[0][0]["timestamp"]) -
|
||||
parser.parse(result[0][1]["timestamp"])).seconds
|
||||
self.assertEqual(60, samples_int)
|
||||
|
||||
def test__make_samples_batch_size(self):
|
||||
self.scenario.generate_random_name = mock.Mock(
|
||||
return_value="fake_resource")
|
||||
test_timestamp = datetime.datetime(2015, 10, 20, 14, 18, 40)
|
||||
result = list(self.scenario._make_samples(count=4, interval=60,
|
||||
batch_size=2,
|
||||
timestamp=test_timestamp))
|
||||
self.assertEqual(2, len(result))
|
||||
expected = {"counter_name": "cpu_util",
|
||||
"counter_type": "gauge",
|
||||
"counter_unit": "%",
|
||||
"counter_volume": 1,
|
||||
"resource_id": "fake_resource",
|
||||
"timestamp": test_timestamp.isoformat()}
|
||||
self.assertEqual(expected, result[0][0])
|
||||
samples_int = (parser.parse(result[0][-1]["timestamp"]) -
|
||||
parser.parse(result[1][0]["timestamp"])).seconds
|
||||
# NOTE(idegtiarov): here we check that interval between last sample in
|
||||
# first batch and first sample in second batch is equal 60 sec.
|
||||
self.assertEqual(60, samples_int)
|
||||
|
||||
def test__make_timestamp_query(self):
|
||||
|
Loading…
Reference in New Issue
Block a user