Update all modules to py3
It's mostly done via 2to3 and completed by manual updates fixing TypeErrors and former false print calls. It has been double checked via Functest which cannot cover all logics. Change-Id: If272524f147735a942a84ce1d2bec4e3423817c2 Signed-off-by: Cédric Ollivier <ollivier.cedric@gmail.com>
This commit is contained in:
parent
a8269684ff
commit
dc79be8a3b
15
tox.ini
15
tox.ini
@ -1,6 +1,6 @@
|
|||||||
[tox]
|
[tox]
|
||||||
minversion = 1.6
|
minversion = 1.6
|
||||||
envlist = py27,pep8,yamllint
|
envlist = py36,pep8,yamllint
|
||||||
skipsdist = True
|
skipsdist = True
|
||||||
|
|
||||||
[testenv]
|
[testenv]
|
||||||
@ -16,18 +16,23 @@ setenv =
|
|||||||
commands = py.test -q -s --basetemp={envtmpdir} {posargs}
|
commands = py.test -q -s --basetemp={envtmpdir} {posargs}
|
||||||
|
|
||||||
[testenv:pep8]
|
[testenv:pep8]
|
||||||
|
basepython = python3
|
||||||
commands = flake8 {toxinidir}
|
commands = flake8 {toxinidir}
|
||||||
|
|
||||||
[testenv:venv]
|
[testenv:venv]
|
||||||
|
basepython = python3
|
||||||
commands = {posargs}
|
commands = {posargs}
|
||||||
|
|
||||||
[testenv:cover]
|
[testenv:cover]
|
||||||
|
basepython = python3
|
||||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||||
|
|
||||||
[testenv:docs]
|
[testenv:docs]
|
||||||
|
basepython = python3
|
||||||
commands = python setup.py build_sphinx
|
commands = python setup.py build_sphinx
|
||||||
|
|
||||||
[testenv:yamllint]
|
[testenv:yamllint]
|
||||||
|
basepython = python3
|
||||||
commands = yamllint -s vmtp/cfg.default.yaml
|
commands = yamllint -s vmtp/cfg.default.yaml
|
||||||
|
|
||||||
[flake8]
|
[flake8]
|
||||||
@ -41,9 +46,11 @@ show-source = True
|
|||||||
#H404: multi line docstring should start without a leading new line
|
#H404: multi line docstring should start without a leading new line
|
||||||
#H405: multi line docstring summary not separated with an empty line
|
#H405: multi line docstring summary not separated with an empty line
|
||||||
#H904: Wrap long lines in parentheses instead of a backslash
|
#H904: Wrap long lines in parentheses instead of a backslash
|
||||||
#H106 Don’t put vim configuration in source files
|
#H106: Don’t put vim configuration in source files
|
||||||
#H203 Use assertIs(Not)None to check for None
|
#H203: Use assertIs(Not)None to check for None
|
||||||
ignore = H803,E302,E303,H233,H302,H404,H405,H904
|
#H304: No relative imports
|
||||||
|
#H306: Imports not in alphabetical order
|
||||||
|
ignore = H803,E302,E303,H233,H302,H404,H405,H904,H304,H306
|
||||||
enable-extensions=H106,H203
|
enable-extensions=H106,H203
|
||||||
builtins = _
|
builtins = _
|
||||||
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
|
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build
|
||||||
|
@ -20,7 +20,7 @@ import time
|
|||||||
|
|
||||||
import glanceclient.exc as glance_exception
|
import glanceclient.exc as glance_exception
|
||||||
import keystoneauth1
|
import keystoneauth1
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
from neutronclient.common.exceptions import Conflict
|
from neutronclient.common.exceptions import Conflict
|
||||||
import novaclient
|
import novaclient
|
||||||
import novaclient.exceptions as exceptions
|
import novaclient.exceptions as exceptions
|
||||||
|
@ -21,7 +21,7 @@ from keystoneauth1 import session
|
|||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
|
|
||||||
class Credentials(object):
|
class Credentials(object):
|
||||||
|
|
||||||
|
@ -15,10 +15,10 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
import monitor
|
from . import monitor
|
||||||
import netaddr
|
import netaddr
|
||||||
import sshutils
|
from . import sshutils
|
||||||
|
|
||||||
|
|
||||||
# a dictionary of sequence number indexed by a name prefix
|
# a dictionary of sequence number indexed by a name prefix
|
||||||
|
@ -15,8 +15,8 @@
|
|||||||
|
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
from perf_tool import PerfTool
|
from .perf_tool import PerfTool
|
||||||
|
|
||||||
# The resulting unit should be in K
|
# The resulting unit should be in K
|
||||||
MULTIPLIERS = {'K': 1,
|
MULTIPLIERS = {'K': 1,
|
||||||
|
@ -126,7 +126,7 @@ class MonitorExecutor(Thread):
|
|||||||
raw_data = self.retrieve_stats_raw()
|
raw_data = self.retrieve_stats_raw()
|
||||||
|
|
||||||
if raw_data is None or len(raw_data) == 0:
|
if raw_data is None or len(raw_data) == 0:
|
||||||
print "Failed to retrieve stats from server"
|
print("Failed to retrieve stats from server")
|
||||||
return
|
return
|
||||||
|
|
||||||
xtree = etree.XML(raw_data)
|
xtree = etree.XML(raw_data)
|
||||||
@ -175,7 +175,7 @@ class MonitorExecutor(Thread):
|
|||||||
try:
|
try:
|
||||||
soc.connect((self.gmond_svr_ip, self.gmond_port))
|
soc.connect((self.gmond_svr_ip, self.gmond_port))
|
||||||
except socket.error as exp:
|
except socket.error as exp:
|
||||||
print "Connection failure host: %s [%s]" % (self.gmond_svr_ip, exp)
|
print("Connection failure host: %s [%s]" % (self.gmond_svr_ip, exp))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
data = ""
|
data = ""
|
||||||
@ -183,7 +183,7 @@ class MonitorExecutor(Thread):
|
|||||||
try:
|
try:
|
||||||
rbytes = soc.recv(4096)
|
rbytes = soc.recv(4096)
|
||||||
except socket.error as exp:
|
except socket.error as exp:
|
||||||
print "Read failed for host: ", str(exp)
|
print("Read failed for host: ", str(exp))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if len(rbytes) == 0:
|
if len(rbytes) == 0:
|
||||||
@ -249,7 +249,7 @@ class Monitor(object):
|
|||||||
the telnet command on the port to retrieve the xml raw data.
|
the telnet command on the port to retrieve the xml raw data.
|
||||||
'''
|
'''
|
||||||
cmd = "telnet " + self.gmond_svr_ip + " " + str(self.gmond_port)
|
cmd = "telnet " + self.gmond_svr_ip + " " + str(self.gmond_port)
|
||||||
print "cmd: ", cmd
|
print("cmd: ", cmd)
|
||||||
port = str(self.gmond_port)
|
port = str(self.gmond_port)
|
||||||
|
|
||||||
proc = subprocess.Popen(["telnet", self.gmond_svr_ip, port],
|
proc = subprocess.Popen(["telnet", self.gmond_svr_ip, port],
|
||||||
@ -369,23 +369,23 @@ class Monitor(object):
|
|||||||
Print the CPU stats
|
Print the CPU stats
|
||||||
'''
|
'''
|
||||||
hl_len = 80
|
hl_len = 80
|
||||||
print "-" * hl_len
|
print("-" * hl_len)
|
||||||
print "CPU Statistics: ",
|
print("CPU Statistics: ", end=' ')
|
||||||
|
|
||||||
for parsed_node in self.gmond_parsed_tree_list:
|
for parsed_node in self.gmond_parsed_tree_list:
|
||||||
hosts = self.get_host_list(parsed_node)
|
hosts = self.get_host_list(parsed_node)
|
||||||
|
|
||||||
print self.get_formatted_datetime(parsed_node)
|
print(self.get_formatted_datetime(parsed_node))
|
||||||
print self.get_formatted_host_row(hosts)
|
print(self.get_formatted_host_row(hosts))
|
||||||
print "-" * hl_len
|
print("-" * hl_len)
|
||||||
print self.get_formatted_metric_row(parsed_node, "cpu_user", 18)
|
print(self.get_formatted_metric_row(parsed_node, "cpu_user", 18))
|
||||||
print self.get_formatted_metric_row(parsed_node, "cpu_system", 18)
|
print(self.get_formatted_metric_row(parsed_node, "cpu_system", 18))
|
||||||
|
|
||||||
print "Aggregate ",
|
print("Aggregate ", end=' ')
|
||||||
for host in hosts:
|
for host in hosts:
|
||||||
print str(self.get_aggregate_cpu_usage(parsed_node,
|
print(str(self.get_aggregate_cpu_usage(parsed_node,
|
||||||
host['NAME'])).ljust(16),
|
host['NAME'])).ljust(16), end=' ')
|
||||||
print "\n"
|
print("\n")
|
||||||
|
|
||||||
def dump_gmond_parsed_tree(self):
|
def dump_gmond_parsed_tree(self):
|
||||||
'''
|
'''
|
||||||
@ -394,18 +394,18 @@ class Monitor(object):
|
|||||||
hl_len = 60
|
hl_len = 60
|
||||||
|
|
||||||
for parsed_node in self.gmond_parsed_tree_list:
|
for parsed_node in self.gmond_parsed_tree_list:
|
||||||
print "%-20s (%s) URL: %s " % \
|
print("%-20s (%s) URL: %s " %
|
||||||
(parsed_node['CLUSTER-NAME'],
|
(parsed_node['CLUSTER-NAME'],
|
||||||
parsed_node['LOCALTIME'],
|
parsed_node['LOCALTIME'],
|
||||||
parsed_node['URL'])
|
parsed_node['URL']))
|
||||||
print "-" * hl_len
|
print("-" * hl_len)
|
||||||
|
|
||||||
row_str = " ".ljust(9)
|
row_str = " ".ljust(9)
|
||||||
for host in parsed_node['hosts']:
|
for host in parsed_node['hosts']:
|
||||||
row_str += host['NAME'].ljust(15)
|
row_str += host['NAME'].ljust(15)
|
||||||
row_str += "\n"
|
row_str += "\n"
|
||||||
print row_str
|
print(row_str)
|
||||||
print "-" * hl_len
|
print("-" * hl_len)
|
||||||
metric_count = len(parsed_node['hosts'][0]['metrics'])
|
metric_count = len(parsed_node['hosts'][0]['metrics'])
|
||||||
for count in range(0, metric_count):
|
for count in range(0, metric_count):
|
||||||
row_str = ""
|
row_str = ""
|
||||||
@ -418,7 +418,7 @@ class Monitor(object):
|
|||||||
|
|
||||||
row_str += str(parsed_node['hosts'][0]).ljust(5)
|
row_str += str(parsed_node['hosts'][0]).ljust(5)
|
||||||
|
|
||||||
print row_str
|
print(row_str)
|
||||||
|
|
||||||
|
|
||||||
##################################################
|
##################################################
|
||||||
@ -426,17 +426,17 @@ class Monitor(object):
|
|||||||
# invoked from pns script.
|
# invoked from pns script.
|
||||||
##################################################
|
##################################################
|
||||||
def main():
|
def main():
|
||||||
print "main: monitor"
|
print("main: monitor")
|
||||||
gmon = Monitor("172.22.191.151", 8649)
|
gmon = Monitor("172.22.191.151", 8649)
|
||||||
gmon.start_monitoring_thread(freq=5, count=20)
|
gmon.start_monitoring_thread(freq=5, count=20)
|
||||||
print "wait for 15 seconds"
|
print("wait for 15 seconds")
|
||||||
time.sleep(20)
|
time.sleep(20)
|
||||||
print "Now force the thread to stop"
|
print("Now force the thread to stop")
|
||||||
gmon.stop_monitoring_thread()
|
gmon.stop_monitoring_thread()
|
||||||
gmon.dump_cpu_stats()
|
gmon.dump_cpu_stats()
|
||||||
|
|
||||||
cpu_metric = gmon.build_cpu_metrics()
|
cpu_metric = gmon.build_cpu_metrics()
|
||||||
print "cpu_metric: ", cpu_metric
|
print("cpu_metric: ", cpu_metric)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -15,13 +15,13 @@
|
|||||||
|
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
# Module containing a helper class for operating on OpenStack networks
|
# Module containing a helper class for operating on OpenStack networks
|
||||||
from neutronclient.common.exceptions import IpAddressInUseClient
|
from neutronclient.common.exceptions import IpAddressInUseClient
|
||||||
from neutronclient.common.exceptions import NetworkInUseClient
|
from neutronclient.common.exceptions import NetworkInUseClient
|
||||||
from neutronclient.common.exceptions import NeutronException
|
from neutronclient.common.exceptions import NeutronException
|
||||||
from neutronclient.common.exceptions import PortInUseClient
|
from neutronclient.common.exceptions import PortInUseClient
|
||||||
import vmtp
|
from . import vmtp
|
||||||
|
|
||||||
class Network(object):
|
class Network(object):
|
||||||
|
|
||||||
|
@ -13,10 +13,10 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
from perf_tool import PerfTool
|
from .perf_tool import PerfTool
|
||||||
import re
|
import re
|
||||||
import sshutils
|
from . import sshutils
|
||||||
|
|
||||||
|
|
||||||
class NuttcpTool(PerfTool):
|
class NuttcpTool(PerfTool):
|
||||||
@ -58,7 +58,7 @@ class NuttcpTool(PerfTool):
|
|||||||
self.instance.display('Measuring TCP Throughput (packet size=%d)...',
|
self.instance.display('Measuring TCP Throughput (packet size=%d)...',
|
||||||
pkt_size)
|
pkt_size)
|
||||||
loop_count = self.instance.config.tcp_tp_loop_count
|
loop_count = self.instance.config.tcp_tp_loop_count
|
||||||
for _ in xrange(loop_count):
|
for _ in range(loop_count):
|
||||||
res = self.run_client_dir(target_ip, mss,
|
res = self.run_client_dir(target_ip, mss,
|
||||||
reverse_dir=reverse_dir,
|
reverse_dir=reverse_dir,
|
||||||
bandwidth_kbps=bandwidth,
|
bandwidth_kbps=bandwidth,
|
||||||
|
@ -13,8 +13,8 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from instance import Instance as Instance
|
from .instance import Instance as Instance
|
||||||
from perf_tool import PingTool
|
from .perf_tool import PingTool
|
||||||
|
|
||||||
class PerfInstance(Instance):
|
class PerfInstance(Instance):
|
||||||
'''An openstack instance to run performance tools
|
'''An openstack instance to run performance tools
|
||||||
|
@ -17,7 +17,7 @@ import abc
|
|||||||
import re
|
import re
|
||||||
import six
|
import six
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
from pkg_resources import resource_filename
|
from pkg_resources import resource_filename
|
||||||
|
|
||||||
# where to copy the tool on the target, must end with slash
|
# where to copy the tool on the target, must end with slash
|
||||||
|
@ -32,8 +32,8 @@ def connect_to_mongod(mongod_ip, mongod_port):
|
|||||||
try:
|
try:
|
||||||
client = pymongo.MongoClient(mongod_ip, mongod_port)
|
client = pymongo.MongoClient(mongod_ip, mongod_port)
|
||||||
except pymongo.errors.ConnectionFailure:
|
except pymongo.errors.ConnectionFailure:
|
||||||
print "ERROR: pymongo. Connection Failure (%s) (%d)" % \
|
print("ERROR: pymongo. Connection Failure (%s) (%d)" %
|
||||||
(mongod_ip, mongod_port)
|
(mongod_ip, mongod_port))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return client
|
return client
|
||||||
@ -45,7 +45,7 @@ def get_mongod_collection(db_client, database_name, collection_name):
|
|||||||
'''
|
'''
|
||||||
mongo_db = db_client[database_name]
|
mongo_db = db_client[database_name]
|
||||||
if mongo_db is None:
|
if mongo_db is None:
|
||||||
print "Invalid database name"
|
print("Invalid database name")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
collection = mongo_db[collection_name]
|
collection = mongo_db[collection_name]
|
||||||
@ -63,11 +63,11 @@ def is_type_dict(var):
|
|||||||
|
|
||||||
def add_new_document_to_collection(collection, document):
|
def add_new_document_to_collection(collection, document):
|
||||||
if collection is None:
|
if collection is None:
|
||||||
print "collection cannot be none"
|
print("collection cannot be none")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if not is_type_dict(document):
|
if not is_type_dict(document):
|
||||||
print "Document type should be a dictionary"
|
print("Document type should be a dictionary")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
post_id = collection.insert(document)
|
post_id = collection.insert(document)
|
||||||
@ -77,20 +77,20 @@ def add_new_document_to_collection(collection, document):
|
|||||||
|
|
||||||
def search_documents_in_collection(collection, pattern):
|
def search_documents_in_collection(collection, pattern):
|
||||||
if collection is None:
|
if collection is None:
|
||||||
print "collection cannot be None"
|
print("collection cannot be None")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
if pattern is None:
|
if pattern is None:
|
||||||
pattern = {}
|
pattern = {}
|
||||||
|
|
||||||
if not is_type_dict(pattern):
|
if not is_type_dict(pattern):
|
||||||
print "pattern type should be a dictionary"
|
print("pattern type should be a dictionary")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
output = collection.find(pattern)
|
output = collection.find(pattern)
|
||||||
except TypeError:
|
except TypeError:
|
||||||
print "A TypeError occurred. Invalid pattern: ", pattern
|
print("A TypeError occurred. Invalid pattern: ", pattern)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return output
|
return output
|
||||||
@ -104,14 +104,14 @@ def pns_add_test_result_to_mongod(mongod_ip,
|
|||||||
'''
|
'''
|
||||||
client = connect_to_mongod(mongod_ip, mongod_port)
|
client = connect_to_mongod(mongod_ip, mongod_port)
|
||||||
if client is None:
|
if client is None:
|
||||||
print "ERROR: Failed to connect to mongod (%s) (%d)" % \
|
print("ERROR: Failed to connect to mongod (%s) (%d)" %
|
||||||
(mongod_ip, mongod_port)
|
(mongod_ip, mongod_port))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
collection = get_mongod_collection(client, pns_database, pns_collection)
|
collection = get_mongod_collection(client, pns_database, pns_collection)
|
||||||
if collection is None:
|
if collection is None:
|
||||||
print "ERROR: Failed to get collection DB: %s, %s" % \
|
print("ERROR: Failed to get collection DB: %s, %s" %
|
||||||
(pns_database, pns_collection)
|
(pns_database, pns_collection))
|
||||||
return None
|
return None
|
||||||
|
|
||||||
post_id = add_new_document_to_collection(collection, document)
|
post_id = add_new_document_to_collection(collection, document)
|
||||||
@ -127,14 +127,14 @@ def pns_search_results_from_mongod(mongod_ip, mongod_port,
|
|||||||
'''
|
'''
|
||||||
client = connect_to_mongod(mongod_ip, mongod_port)
|
client = connect_to_mongod(mongod_ip, mongod_port)
|
||||||
if client is None:
|
if client is None:
|
||||||
print "ERROR: Failed to connect to mongod (%s) (%d)" % \
|
print("ERROR: Failed to connect to mongod (%s) (%d)" %
|
||||||
(mongod_ip, mongod_port)
|
(mongod_ip, mongod_port))
|
||||||
return
|
return
|
||||||
|
|
||||||
collection = get_mongod_collection(client, pns_database, pns_collection)
|
collection = get_mongod_collection(client, pns_database, pns_collection)
|
||||||
if collection is None:
|
if collection is None:
|
||||||
print "ERROR: Failed to get collection DB: %s, %s" % \
|
print("ERROR: Failed to get collection DB: %s, %s" %
|
||||||
(pns_database, pns_collection)
|
(pns_database, pns_collection))
|
||||||
return
|
return
|
||||||
|
|
||||||
docs = search_documents_in_collection(collection, pattern)
|
docs = search_documents_in_collection(collection, pattern)
|
||||||
|
@ -19,7 +19,7 @@ import argparse
|
|||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
import pns_mongo
|
from . import pns_mongo
|
||||||
import tabulate
|
import tabulate
|
||||||
|
|
||||||
###########################################
|
###########################################
|
||||||
@ -126,8 +126,8 @@ def show_pnsdb_summary(db_server, db_port, db_name, db_collection):
|
|||||||
db_collection,
|
db_collection,
|
||||||
pattern)
|
pattern)
|
||||||
record_list = get_tcp_flow_data(data)
|
record_list = get_tcp_flow_data(data)
|
||||||
print tabulate.tabulate(record_list, headers="keys", tablefmt="grid")
|
print(tabulate.tabulate(record_list, headers="keys", tablefmt="grid"))
|
||||||
print data.count()
|
print(data.count())
|
||||||
|
|
||||||
data = pns_mongo.pns_search_results_from_mongod(db_server,
|
data = pns_mongo.pns_search_results_from_mongod(db_server,
|
||||||
db_port,
|
db_port,
|
||||||
@ -135,8 +135,8 @@ def show_pnsdb_summary(db_server, db_port, db_name, db_collection):
|
|||||||
db_collection,
|
db_collection,
|
||||||
pattern)
|
pattern)
|
||||||
record_list = get_udp_flow_data(data)
|
record_list = get_udp_flow_data(data)
|
||||||
print "UDP:"
|
print("UDP:")
|
||||||
print tabulate.tabulate(record_list, headers="keys", tablefmt="grid")
|
print(tabulate.tabulate(record_list, headers="keys", tablefmt="grid"))
|
||||||
|
|
||||||
|
|
||||||
def get_results_info(results, cols, protocol=None):
|
def get_results_info(results, cols, protocol=None):
|
||||||
@ -148,7 +148,7 @@ def get_results_info(results, cols, protocol=None):
|
|||||||
if result['protocol'] != protocol:
|
if result['protocol'] != protocol:
|
||||||
continue
|
continue
|
||||||
for col in cols:
|
for col in cols:
|
||||||
if col in result.keys():
|
if col in list(result.keys()):
|
||||||
show_result[col] = result[col]
|
show_result[col] = result[col]
|
||||||
|
|
||||||
result_list.append(show_result)
|
result_list.append(show_result)
|
||||||
@ -185,17 +185,17 @@ def get_record_info(record, cols):
|
|||||||
|
|
||||||
|
|
||||||
def print_record_header(record):
|
def print_record_header(record):
|
||||||
print "#" * 60
|
print("#" * 60)
|
||||||
print "RUN: %s" % (record['date'])
|
print("RUN: %s" % (record['date']))
|
||||||
cols = ['date', 'distro', 'openstack_version', 'encapsulation']
|
cols = ['date', 'distro', 'openstack_version', 'encapsulation']
|
||||||
record_list = get_record_info(record, cols)
|
record_list = get_record_info(record, cols)
|
||||||
print tabulate.tabulate(record_list)
|
print(tabulate.tabulate(record_list))
|
||||||
|
|
||||||
|
|
||||||
def print_flow_header(flow):
|
def print_flow_header(flow):
|
||||||
cols = ['desc']
|
cols = ['desc']
|
||||||
flow_list = get_flow_info(flow, cols)
|
flow_list = get_flow_info(flow, cols)
|
||||||
print tabulate.tabulate(flow_list, tablefmt="simple")
|
print(tabulate.tabulate(flow_list, tablefmt="simple"))
|
||||||
|
|
||||||
|
|
||||||
def show_tcp_summary_encap_vlan(db_server, db_port, db_name, db_collection):
|
def show_tcp_summary_encap_vlan(db_server, db_port, db_name, db_collection):
|
||||||
@ -213,10 +213,10 @@ def show_tcp_summary_encap_vlan(db_server, db_port, db_name, db_collection):
|
|||||||
cols = ['throughput_kbps', 'protocol', 'tool', 'rtt_ms']
|
cols = ['throughput_kbps', 'protocol', 'tool', 'rtt_ms']
|
||||||
result_list = get_results_info(flow['results'], cols,
|
result_list = get_results_info(flow['results'], cols,
|
||||||
protocol="TCP")
|
protocol="TCP")
|
||||||
print tabulate.tabulate(result_list,
|
print(tabulate.tabulate(result_list,
|
||||||
headers="keys", tablefmt="grid")
|
headers="keys", tablefmt="grid"))
|
||||||
|
|
||||||
print "\n"
|
print("\n")
|
||||||
|
|
||||||
|
|
||||||
def show_udp_summary_encap_vlan(db_server, db_port, db_name, db_collection):
|
def show_udp_summary_encap_vlan(db_server, db_port, db_name, db_collection):
|
||||||
@ -234,16 +234,16 @@ def show_udp_summary_encap_vlan(db_server, db_port, db_name, db_collection):
|
|||||||
cols = ['throughput_kbps', 'protocol', 'loss_rate', 'pkt_size']
|
cols = ['throughput_kbps', 'protocol', 'loss_rate', 'pkt_size']
|
||||||
result_list = get_results_info(flow['results'], cols,
|
result_list = get_results_info(flow['results'], cols,
|
||||||
protocol="UDP")
|
protocol="UDP")
|
||||||
print tabulate.tabulate(result_list,
|
print(tabulate.tabulate(result_list,
|
||||||
headers="keys", tablefmt="grid")
|
headers="keys", tablefmt="grid"))
|
||||||
|
|
||||||
|
|
||||||
def show_summary_all(db_server, db_port, db_name, db_collection):
|
def show_summary_all(db_server, db_port, db_name, db_collection):
|
||||||
pattern = {}
|
pattern = {}
|
||||||
|
|
||||||
print "-" * 60
|
print("-" * 60)
|
||||||
print "Summary Data: "
|
print("Summary Data: ")
|
||||||
print "-" * 60
|
print("-" * 60)
|
||||||
|
|
||||||
data = pns_mongo.pns_search_results_from_mongod(db_server,
|
data = pns_mongo.pns_search_results_from_mongod(db_server,
|
||||||
db_port,
|
db_port,
|
||||||
@ -260,10 +260,10 @@ def show_summary_all(db_server, db_port, db_name, db_collection):
|
|||||||
'rtt_ms', 'loss_rate', 'pkt_size',
|
'rtt_ms', 'loss_rate', 'pkt_size',
|
||||||
'rtt_avg_ms']
|
'rtt_avg_ms']
|
||||||
result_list = get_results_info(flow['results'], cols)
|
result_list = get_results_info(flow['results'], cols)
|
||||||
print tabulate.tabulate(result_list,
|
print(tabulate.tabulate(result_list,
|
||||||
headers="keys", tablefmt="grid")
|
headers="keys", tablefmt="grid"))
|
||||||
|
|
||||||
print "\n"
|
print("\n")
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
@ -287,7 +287,7 @@ def main():
|
|||||||
(opts, _) = parser.parse_known_args()
|
(opts, _) = parser.parse_known_args()
|
||||||
|
|
||||||
if not opts.server_ip:
|
if not opts.server_ip:
|
||||||
print "Provide the pns db server ip address"
|
print("Provide the pns db server ip address")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
db_server = opts.server_ip
|
db_server = opts.server_ip
|
||||||
@ -300,24 +300,24 @@ def main():
|
|||||||
db_name = "pnsdb"
|
db_name = "pnsdb"
|
||||||
|
|
||||||
if opts.official:
|
if opts.official:
|
||||||
print "Use db collection officialdata"
|
print("Use db collection officialdata")
|
||||||
db_collection = "officialdata"
|
db_collection = "officialdata"
|
||||||
else:
|
else:
|
||||||
db_collection = "testdata"
|
db_collection = "testdata"
|
||||||
|
|
||||||
print "-" * 40
|
print("-" * 40)
|
||||||
print "Reports Menu:"
|
print("Reports Menu:")
|
||||||
print "-" * 40
|
print("-" * 40)
|
||||||
count = 0
|
count = 0
|
||||||
for option in pnsdb_results_list:
|
for option in pnsdb_results_list:
|
||||||
print "%d: %s" % (count, option[0])
|
print("%d: %s" % (count, option[0]))
|
||||||
count += 1
|
count += 1
|
||||||
print "\n"
|
print("\n")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
user_opt = int(raw_input("Choose a report [no] : "))
|
user_opt = int(input("Choose a report [no] : "))
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print "Invalid option"
|
print("Invalid option")
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
globals()[pnsdb_results_list[user_opt][1]](db_server,
|
globals()[pnsdb_results_list[user_opt][1]](db_server,
|
||||||
|
@ -60,11 +60,11 @@ Eventlet:
|
|||||||
import re
|
import re
|
||||||
import select
|
import select
|
||||||
import socket
|
import socket
|
||||||
import StringIO
|
import io
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
import paramiko
|
import paramiko
|
||||||
import scp
|
import scp
|
||||||
|
|
||||||
@ -176,8 +176,8 @@ class SSH(object):
|
|||||||
'''Get the binary form of the private key
|
'''Get the binary form of the private key
|
||||||
from the text form
|
from the text form
|
||||||
'''
|
'''
|
||||||
if isinstance(key, basestring):
|
if isinstance(key, str):
|
||||||
key = StringIO.StringIO(key)
|
key = io.StringIO(key)
|
||||||
errors = []
|
errors = []
|
||||||
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
|
for key_class in (paramiko.rsakey.RSAKey, paramiko.dsskey.DSSKey):
|
||||||
try:
|
try:
|
||||||
@ -233,8 +233,8 @@ class SSH(object):
|
|||||||
|
|
||||||
client = self._get_client()
|
client = self._get_client()
|
||||||
|
|
||||||
if isinstance(stdin, basestring):
|
if isinstance(stdin, str):
|
||||||
stdin = StringIO.StringIO(stdin)
|
stdin = io.StringIO(stdin)
|
||||||
|
|
||||||
return self._run(client, cmd, stdin=stdin, stdout=stdout,
|
return self._run(client, cmd, stdin=stdin, stdout=stdout,
|
||||||
stderr=stderr, raise_on_error=raise_on_error,
|
stderr=stderr, raise_on_error=raise_on_error,
|
||||||
@ -245,7 +245,7 @@ class SSH(object):
|
|||||||
|
|
||||||
transport = client.get_transport()
|
transport = client.get_transport()
|
||||||
session = transport.open_session()
|
session = transport.open_session()
|
||||||
session.exec_command(cmd)
|
session.exec_command(cmd.encode())
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
data_to_send = ''
|
data_to_send = ''
|
||||||
@ -265,13 +265,13 @@ class SSH(object):
|
|||||||
if session.recv_ready():
|
if session.recv_ready():
|
||||||
data = session.recv(4096)
|
data = session.recv(4096)
|
||||||
if stdout is not None:
|
if stdout is not None:
|
||||||
stdout.write(data)
|
stdout.write(data.decode())
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if session.recv_stderr_ready():
|
if session.recv_stderr_ready():
|
||||||
stderr_data = session.recv_stderr(4096)
|
stderr_data = session.recv_stderr(4096)
|
||||||
if stderr is not None:
|
if stderr is not None:
|
||||||
stderr.write(stderr_data)
|
stderr.write(stderr_data.decode())
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if session.send_ready():
|
if session.send_ready():
|
||||||
@ -283,7 +283,7 @@ class SSH(object):
|
|||||||
session.shutdown_write()
|
session.shutdown_write()
|
||||||
writes = []
|
writes = []
|
||||||
continue
|
continue
|
||||||
sent_bytes = session.send(data_to_send)
|
sent_bytes = session.send(data_to_send.encode())
|
||||||
data_to_send = data_to_send[sent_bytes:]
|
data_to_send = data_to_send[sent_bytes:]
|
||||||
|
|
||||||
if session.exit_status_ready():
|
if session.exit_status_ready():
|
||||||
@ -315,8 +315,8 @@ class SSH(object):
|
|||||||
Return tuple (exit_status, stdout, stderr)
|
Return tuple (exit_status, stdout, stderr)
|
||||||
|
|
||||||
"""
|
"""
|
||||||
stdout = StringIO.StringIO()
|
stdout = io.StringIO()
|
||||||
stderr = StringIO.StringIO()
|
stderr = io.StringIO()
|
||||||
|
|
||||||
exit_status = self.run(cmd, stderr=stderr,
|
exit_status = self.run(cmd, stderr=stderr,
|
||||||
stdout=stdout, stdin=stdin,
|
stdout=stdout, stdin=stdin,
|
||||||
@ -669,16 +669,16 @@ def main():
|
|||||||
# As argument pass the SSH access string, e.g. "localadmin@1.1.1.1:secret"
|
# As argument pass the SSH access string, e.g. "localadmin@1.1.1.1:secret"
|
||||||
test_ssh = SSH(SSHAccess(sys.argv[1]))
|
test_ssh = SSH(SSHAccess(sys.argv[1]))
|
||||||
|
|
||||||
print 'ID=' + test_ssh.distro_id
|
print('ID=' + test_ssh.distro_id)
|
||||||
print 'ID_LIKE=' + test_ssh.distro_id_like
|
print('ID_LIKE=' + test_ssh.distro_id_like)
|
||||||
print 'VERSION_ID=' + test_ssh.distro_version
|
print('VERSION_ID=' + test_ssh.distro_version)
|
||||||
|
|
||||||
# ssh.wait()
|
# ssh.wait()
|
||||||
# print ssh.pidof('bash')
|
# print ssh.pidof('bash')
|
||||||
# print ssh.stat('/tmp')
|
# print ssh.stat('/tmp')
|
||||||
print test_ssh.check_openstack_version()
|
print(test_ssh.check_openstack_version())
|
||||||
print test_ssh.get_cpu_info()
|
print(test_ssh.get_cpu_info())
|
||||||
print test_ssh.get_l2agent_version("Open vSwitch agent")
|
print(test_ssh.get_l2agent_version("Open vSwitch agent"))
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
46
vmtp/vmtp.py
46
vmtp/vmtp.py
@ -18,35 +18,35 @@ import argparse
|
|||||||
import datetime
|
import datetime
|
||||||
import hashlib
|
import hashlib
|
||||||
import json
|
import json
|
||||||
import log
|
from . import log
|
||||||
import os
|
import os
|
||||||
import pprint
|
import pprint
|
||||||
import re
|
import re
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from __init__ import __version__
|
from .__init__ import __version__
|
||||||
import compute
|
from . import compute
|
||||||
from config import config_load
|
from .config import config_load
|
||||||
from config import config_loads
|
from .config import config_loads
|
||||||
import credentials
|
from . import credentials
|
||||||
from fluentd import FluentLogHandler
|
from .fluentd import FluentLogHandler
|
||||||
from glanceclient.v2 import client as glanceclient
|
from glanceclient.v2 import client as glanceclient
|
||||||
import iperf_tool
|
from . import iperf_tool
|
||||||
from keystoneclient import client as keystoneclient
|
from keystoneclient import client as keystoneclient
|
||||||
from log import CONLOG
|
from .log import CONLOG
|
||||||
from log import FILELOG
|
from .log import FILELOG
|
||||||
from log import LOG
|
from .log import LOG
|
||||||
import network
|
from . import network
|
||||||
from neutronclient.neutron import client as neutronclient
|
from neutronclient.neutron import client as neutronclient
|
||||||
from novaclient import client as novaclient
|
from novaclient import client as novaclient
|
||||||
from novaclient.exceptions import ClientException
|
from novaclient.exceptions import ClientException
|
||||||
import nuttcp_tool
|
from . import nuttcp_tool
|
||||||
from perf_instance import PerfInstance as PerfInstance
|
from .perf_instance import PerfInstance as PerfInstance
|
||||||
from pkg_resources import resource_string
|
from pkg_resources import resource_string
|
||||||
import pns_mongo
|
from . import pns_mongo
|
||||||
from prettytable import PrettyTable
|
from prettytable import PrettyTable
|
||||||
import sshutils
|
from . import sshutils
|
||||||
|
|
||||||
flow_num = 0
|
flow_num = 0
|
||||||
return_code = 0
|
return_code = 0
|
||||||
@ -99,7 +99,7 @@ class ResultsCollector(object):
|
|||||||
|
|
||||||
def generate_runid(self):
|
def generate_runid(self):
|
||||||
key = self.results['args'] + self.results['date'] + self.results['version']
|
key = self.results['args'] + self.results['date'] + self.results['version']
|
||||||
self.results['run_id'] = hashlib.md5(key).hexdigest()[:7]
|
self.results['run_id'] = hashlib.md5(key.encode()).hexdigest()[:7]
|
||||||
|
|
||||||
def save(self, cfg):
|
def save(self, cfg):
|
||||||
'''Save results in json format file.'''
|
'''Save results in json format file.'''
|
||||||
@ -589,7 +589,7 @@ def gen_report_data(proto, result):
|
|||||||
retval = {'tp_kbps': 0, 'rtt_ms': 0}
|
retval = {'tp_kbps': 0, 'rtt_ms': 0}
|
||||||
elif proto == 'UDP' or proto == 'Multicast':
|
elif proto == 'UDP' or proto == 'Multicast':
|
||||||
pkt_size_list = [x['pkt_size'] for x in result]
|
pkt_size_list = [x['pkt_size'] for x in result]
|
||||||
retval = dict(zip(pkt_size_list, [{}, {}, {}]))
|
retval = dict(list(zip(pkt_size_list, [{}, {}, {}])))
|
||||||
|
|
||||||
for item in result:
|
for item in result:
|
||||||
if proto in ['TCP', 'Upload', 'Download']:
|
if proto in ['TCP', 'Upload', 'Download']:
|
||||||
@ -617,7 +617,7 @@ def gen_report_data(proto, result):
|
|||||||
retval['rtt avg/min/max/stddev msec'] = pkt_size_results
|
retval['rtt avg/min/max/stddev msec'] = pkt_size_results
|
||||||
|
|
||||||
if proto in ['TCP', 'Upload', 'Download']:
|
if proto in ['TCP', 'Upload', 'Download']:
|
||||||
for key in retval.keys():
|
for key in list(retval.keys()):
|
||||||
if retval[key]:
|
if retval[key]:
|
||||||
retval[key] = '{0:n}'.format(retval[key] / tcp_test_count)
|
retval[key] = '{0:n}'.format(retval[key] / tcp_test_count)
|
||||||
else:
|
else:
|
||||||
@ -696,7 +696,7 @@ def print_report(results):
|
|||||||
table.append(['8.1', 'VM to Host Uploading', run_status[2][0][0][0], run_data[2][0][0][0]])
|
table.append(['8.1', 'VM to Host Uploading', run_status[2][0][0][0], run_data[2][0][0][0]])
|
||||||
table.append(['8.2', 'VM to Host Downloading', run_status[2][0][0][1], run_data[2][0][0][1]])
|
table.append(['8.2', 'VM to Host Downloading', run_status[2][0][0][1], run_data[2][0][0][1]])
|
||||||
|
|
||||||
ptable = zip(*table[1:])[2]
|
ptable = list(zip(*table[1:]))[2]
|
||||||
cnt_passed = ptable.count(SPASS)
|
cnt_passed = ptable.count(SPASS)
|
||||||
cnt_failed = ptable.count(SFAIL)
|
cnt_failed = ptable.count(SFAIL)
|
||||||
cnt_skipped = ptable.count("SKIPPED")
|
cnt_skipped = ptable.count("SKIPPED")
|
||||||
@ -975,7 +975,7 @@ def parse_opts_from_cli():
|
|||||||
def decode_size_list(argname, size_list):
|
def decode_size_list(argname, size_list):
|
||||||
try:
|
try:
|
||||||
pkt_sizes = size_list.split(',')
|
pkt_sizes = size_list.split(',')
|
||||||
for i in xrange(len(pkt_sizes)):
|
for i in range(len(pkt_sizes)):
|
||||||
pkt_sizes[i] = int(pkt_sizes[i])
|
pkt_sizes[i] = int(pkt_sizes[i])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error('Invalid %s parameter. A valid input must be '
|
LOG.error('Invalid %s parameter. A valid input must be '
|
||||||
@ -1000,7 +1000,7 @@ def merge_opts_to_configs(opts):
|
|||||||
config = config_load(opts.config, config)
|
config = config_load(opts.config, config)
|
||||||
|
|
||||||
if opts.show_config:
|
if opts.show_config:
|
||||||
print(default_cfg_file)
|
print(default_cfg_file.decode())
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
if opts.version:
|
if opts.version:
|
||||||
@ -1167,7 +1167,7 @@ def run_vmtp(opts):
|
|||||||
if (sys.argv == ['']):
|
if (sys.argv == ['']):
|
||||||
# Running from a Python call
|
# Running from a Python call
|
||||||
def_opts = parse_opts_from_cli()
|
def_opts = parse_opts_from_cli()
|
||||||
for key, value in vars(def_opts).iteritems():
|
for key, value in list(vars(def_opts).items()):
|
||||||
if key not in opts:
|
if key not in opts:
|
||||||
opts.__setattr__(key, value)
|
opts.__setattr__(key, value)
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ class GoogleChartsBarChart(object):
|
|||||||
sub = 'inter-node'
|
sub = 'inter-node'
|
||||||
for key in subtitle_match:
|
for key in subtitle_match:
|
||||||
if key in res:
|
if key in res:
|
||||||
sub += ' ' + res[key].encode('ascii')
|
sub += ' ' + res[key]
|
||||||
return sub
|
return sub
|
||||||
|
|
||||||
def _get_categories(self, flow):
|
def _get_categories(self, flow):
|
||||||
@ -184,7 +184,7 @@ class GoogleChartsBarChart(object):
|
|||||||
rows = [['Property', 'Value']]
|
rows = [['Property', 'Value']]
|
||||||
for key in prop_match:
|
for key in prop_match:
|
||||||
if key in res:
|
if key in res:
|
||||||
rows.append([prop_match[key], res[key].encode('ascii', 'ignore')])
|
rows.append([prop_match[key], res[key]])
|
||||||
return self._get_js_chart('google.visualization.Table', rows, 'table', id)
|
return self._get_js_chart('google.visualization.Table', rows, 'table', id)
|
||||||
|
|
||||||
def _get_js(self, res, id):
|
def _get_js(self, res, id):
|
||||||
|
Loading…
Reference in New Issue
Block a user