diff --git a/.gitignore b/.gitignore
index 58d97fd..c115f12 100644
--- a/.gitignore
+++ b/.gitignore
@@ -57,3 +57,4 @@ ChangeLog
# vmtp
*.local*
*.json
+*.html
diff --git a/scale/base_network.py b/scale/base_network.py
index 9379330..971087e 100644
--- a/scale/base_network.py
+++ b/scale/base_network.py
@@ -105,7 +105,7 @@ class BaseNetwork(object):
secgroup_name = network_prefix + "-SG" + str(secgroup_count)
secgroup_instance.create_secgroup_with_rules(secgroup_name)
- LOG.info("Scheduled to create virtual machines...")
+ LOG.info("Scheduled to create virtual machines for network %s..." % network_prefix)
if config_scale['use_floatingip']:
external_network = find_external_network(self.neutron_client)
# Schedule to create the required number of VMs
diff --git a/scale/cfg.scale.yaml b/scale/cfg.scale.yaml
index 62e1ae8..a49c71d 100644
--- a/scale/cfg.scale.yaml
+++ b/scale/cfg.scale.yaml
@@ -14,6 +14,9 @@ keystone_admin_role: "admin"
# Cleanup all kloudbuster resources upon exit
cleanup_resources: True
+# VM creation concurrency
+vm_creation_concurrency: 5
+
#
# ssh access to the test VMs launched by kloudbuster is not required
# but can be handy if the user wants to ssh manually to any of them (for example
@@ -50,13 +53,13 @@ server:
# Assign floating IP for every VM
use_floatingip: True
-
+
# Placement hint
# Availability zone to use for servers in the server cloud
# Leave empty if you prefer to have the Nova scheduler place the server VMs
# If you want to pick a particular AZ, put that AZ name (e.g. nova)
# If you want a paticular compute host, put the AZ and compute host names s
- # eparated by ':' (e.g. nova:tme100)
+ # eparated by ':' (e.g. nova:tme100)
availability_zone:
@@ -70,7 +73,7 @@ client:
# Leave empty if you prefer to have the Nova scheduler place the server VMs
# If you want to pick a particular AZ, put that AZ name (e.g. nova)
# If you want a paticular compute host, put the AZ and compute host names s
- # eparated by ':' (e.g. nova:tme100)
+ # eparated by ':' (e.g. nova:tme100)
availability_zone:
# (TEMP) Redis server configuration
@@ -104,4 +107,3 @@ client:
# Prompt before running benchmarking tools
prompt_before_run: False
-
diff --git a/scale/kb_gen_chart.py b/scale/kb_gen_chart.py
index 2140402..fd466d6 100755
--- a/scale/kb_gen_chart.py
+++ b/scale/kb_gen_chart.py
@@ -20,7 +20,6 @@
import argparse
import json
-import locale
import os
import os.path
import sys
@@ -33,7 +32,7 @@ __version__ = '0.0.1'
kb_html_tpl = "./kb_tpl.jinja"
def get_formatted_num(value):
- return locale.format("%d", value, grouping=True)
+ return '{:,}'.format(value)
# List of fields to format with thousands separators
fields_to_format = ['rps_max', 'rps', 'http_sock_err', 'total_server_vm',
@@ -142,5 +141,4 @@ if __name__ == '__main__':
print('Version ' + __version__)
sys.exit(0)
- locale.setlocale(locale.LC_ALL, 'en_US')
gen_chart(opts.file, opts.chart, opts.browser)
diff --git a/scale/kb_tpl.jinja b/scale/kb_tpl.jinja
index 834ed0e..6ed27ec 100644
--- a/scale/kb_tpl.jinja
+++ b/scale/kb_tpl.jinja
@@ -113,7 +113,7 @@
diff --git a/scale/kloudbuster.py b/scale/kloudbuster.py
index 562f8d5..521960f 100644
--- a/scale/kloudbuster.py
+++ b/scale/kloudbuster.py
@@ -16,6 +16,7 @@ import json
from multiprocessing.pool import ThreadPool
import os
import sys
+import threading
import traceback
import configure
@@ -137,8 +138,8 @@ class Kloud(object):
# Store the fixed ip as ssh ip since there is no floating ip
instance.ssh_ip = instance.fixed_ip
- def create_vms(self):
- tpool = ThreadPool(processes=5)
+ def create_vms(self, vm_creation_concurrency):
+ tpool = ThreadPool(processes=vm_creation_concurrency)
tpool.map(self.create_vm, self.get_all_instances())
@@ -167,6 +168,8 @@ class KloudBuster(object):
self.kloud = Kloud(server_cfg, server_cred)
self.testing_kloud = Kloud(client_cfg, client_cred, testing_side=True)
self.final_result = None
+ self.server_vm_create_thread = None
+ self.client_vm_create_thread = None
def print_provision_info(self):
"""
@@ -218,9 +221,9 @@ class KloudBuster(object):
Support concurrency in fututure
"""
kbscheduler = None
+ vm_creation_concurrency = self.client_cfg.vm_creation_concurrency
try:
self.kloud.create_resources()
- self.kloud.create_vms()
self.testing_kloud.create_resources()
# Start the scheduler and ready for the incoming redis messages
@@ -235,7 +238,24 @@ class KloudBuster(object):
shared_net = self.testing_kloud.get_first_network()
self.kloud.attach_to_shared_net(shared_net)
self.gen_user_data()
- self.testing_kloud.create_vms()
+
+ # Create VMs in both tested and testing kloud concurrently
+ self.client_vm_create_thread = threading.Thread(target=self.testing_kloud.create_vms,
+ args=[vm_creation_concurrency])
+ self.server_vm_create_thread = threading.Thread(target=self.kloud.create_vms,
+ args=[vm_creation_concurrency])
+ self.client_vm_create_thread.daemon = True
+ self.server_vm_create_thread.daemon = True
+ if self.single_cloud:
+ self.client_vm_create_thread.start()
+ self.client_vm_create_thread.join()
+ self.server_vm_create_thread.start()
+ self.server_vm_create_thread.join()
+ else:
+ self.client_vm_create_thread.start()
+ self.server_vm_create_thread.start()
+ self.client_vm_create_thread.join()
+ self.server_vm_create_thread.join()
# Function that print all the provisioning info
self.print_provision_info()
@@ -334,6 +354,18 @@ if __name__ == '__main__':
alt_config = configure.Configuration.from_file(CONF.config).configure()
config_scale = config_scale.merge(alt_config)
+ # Retrieve the credentials
+ cred = credentials.Credentials(CONF.tested_rc, CONF.passwd_tested, CONF.no_env)
+ if CONF.testing_rc and CONF.testing_rc != CONF.tested_rc:
+ cred_testing = credentials.Credentials(CONF.testing_rc,
+ CONF.passwd_testing,
+ CONF.no_env)
+ single_cloud = False
+ else:
+ # Use the same openrc file for both cases
+ cred_testing = cred
+ single_cloud = True
+
# Initialize the key pair name
if config_scale['public_key_file']:
# verify the public key file exists
@@ -361,18 +393,6 @@ if __name__ == '__main__':
# VMs on the server side (1:1)
client_side_cfg['vms_per_network'] = get_total_vm_count(server_side_cfg)
- # Retrieve the credentials
- cred = credentials.Credentials(CONF.tested_rc, CONF.passwd_tested, CONF.no_env)
- if CONF.testing_rc and CONF.testing_rc != CONF.tested_rc:
- cred_testing = credentials.Credentials(CONF.testing_rc,
- CONF.passwd_testing,
- CONF.no_env)
- single_cloud = False
- else:
- # Use the same openrc file for both cases
- cred_testing = cred
- single_cloud = True
-
# The KloudBuster class is just a wrapper class
# levarages tenant and user class for resource creations and
# deletion
diff --git a/scale/perf_tool.py b/scale/perf_tool.py
index eae5e88..6aba2db 100644
--- a/scale/perf_tool.py
+++ b/scale/perf_tool.py
@@ -45,7 +45,8 @@ class PerfTool(object):
def parse_results(self, protocol=None, throughput=None, lossrate=None, retrans=None,
rtt_ms=None, reverse_dir=False, msg_size=None, cpu_load=None,
http_total_req=None, http_rps=None, http_tp_kbytes=None,
- http_sock_err=None, http_err=None, latency_stats=None):
+ http_sock_err=None, http_sock_timeout=None, http_err=None,
+ latency_stats=None):
res = {'tool': self.name}
if throughput is not None:
res['throughput_kbps'] = throughput
@@ -73,6 +74,8 @@ class PerfTool(object):
res['http_throughput_kbytes'] = http_tp_kbytes
if http_sock_err:
res['http_sock_err'] = http_sock_err
+ if http_sock_timeout:
+ res['http_sock_timeout'] = http_sock_timeout
if http_err:
res['http_err'] = http_err
if latency_stats:
diff --git a/scale/wrk_tool.py b/scale/wrk_tool.py
index 0de9259..43e8bd7 100644
--- a/scale/wrk_tool.py
+++ b/scale/wrk_tool.py
@@ -84,9 +84,11 @@ class WrkTool(PerfTool):
v2 = int(http_sock_err.group(2))
v3 = int(http_sock_err.group(3))
v4 = int(http_sock_err.group(4))
- http_sock_err = v1 + v2 + v3 + v4
+ http_sock_err = v1 + v2 + v3
+ http_sock_timeout = v4
else:
http_sock_err = 0
+ http_sock_timeout = 0
re_str = r'Non-2xx or 3xx responses: (\d+)'
http_err = re.search(re_str, stdout)
@@ -97,7 +99,7 @@ class WrkTool(PerfTool):
re_str = r'__START_KLOUDBUSTER_DATA__\n(((.*)\n)*)__END_KLOUDBUSTER_DATA__'
latency_stats = re.search(re_str, stdout).group(1).split()
- latency_stats = [(float(x.split(',')[0]), int(x.split(',')[1])) for x in latency_stats]
+ latency_stats = [[float(x.split(',')[0]), int(x.split(',')[1])] for x in latency_stats]
except Exception:
return self.parse_error('Could not parse: %s' % (stdout))
@@ -105,6 +107,7 @@ class WrkTool(PerfTool):
http_rps=http_rps,
http_tp_kbytes=http_tp_kbytes,
http_sock_err=http_sock_err,
+ http_sock_timeout=http_sock_timeout,
http_err=http_err,
latency_stats=latency_stats)
@@ -115,7 +118,8 @@ class WrkTool(PerfTool):
if not total_count:
return all_res
- for key in ['http_rps', 'http_total_req', 'http_sock_err', 'http_throughput_kbytes']:
+ for key in ['http_rps', 'http_total_req', 'http_sock_err',
+ 'http_sock_timeout', 'http_throughput_kbytes']:
all_res[key] = 0
for item in results:
if (key in item['results']):
@@ -127,12 +131,15 @@ class WrkTool(PerfTool):
first_result = results[0]['results']['latency_stats']
latency_counts = len(first_result)
+ # for item in results:
+ # print item['results']['latency_stats']
+
for i in range(latency_counts):
latency_avg = 0
for item in results:
latency_avg += item['results']['latency_stats'][i][1]
latency_avg = int(latency_avg / total_count)
- latency_tup = (first_result[i][0], latency_avg)
+ latency_tup = [first_result[i][0], latency_avg]
all_res['latency_stats'].append(latency_tup)
return all_res