Fixed the code formatting to fit into 80 character lines

This commit is contained in:
Anton Beloglazov 2012-09-25 12:20:30 +10:00
parent 1f3cb3bb16
commit 13cb1d6650
30 changed files with 734 additions and 512 deletions

View File

@ -28,7 +28,8 @@ from neat.db_utils import *
def start(init_state, execute, config, time_interval, iterations):
""" Start the processing loop.
:param init_state: A function accepting a config and returning a state dictionary.
:param init_state: A function accepting a config and
returning a state dictionary.
:type init_state: function
:param execute: A function performing the processing at each iteration.
@ -109,7 +110,8 @@ def physical_cpu_mhz_total(vir_connection):
:return: The total CPU frequency in MHz.
:rtype: int
"""
return physical_cpu_count(vir_connection) * physical_cpu_mhz(vir_connection)
return physical_cpu_count(vir_connection) * \
physical_cpu_mhz(vir_connection)
@contract

View File

@ -24,12 +24,16 @@ import ConfigParser
# This is the default config, which should not be modified
#DEFAILT_CONFIG_PATH = "/etc/neat/neat.conf"
# The following value is used for testing purposes
DEFAILT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), '..', 'neat.conf')
DEFAILT_CONFIG_PATH = os.path.join(os.path.dirname(__file__),
'..',
'neat.conf')
# This is the custom config, which can override the defaults
#CONFIG_PATH = "/etc/neat/neat.conf"
# The following value is used for testing purposes
CONFIG_PATH = os.path.join(os.path.dirname(__file__), '..', 'neat.conf')
CONFIG_PATH = os.path.join(os.path.dirname(__file__),
'..',
'neat.conf')
# These fields must present in the configuration file
REQUIRED_FIELDS = [
@ -109,5 +113,6 @@ def read_and_validate_config(paths, required_fields):
"""
config = read_config(paths)
if not validate_config(config, required_fields):
raise KeyError("The config dictionary does not contain all the required fields")
raise KeyError('The config dictionary does not contain ' +
'all the required fields')
return config

View File

@ -92,8 +92,9 @@ class Database(object):
"""
sel = select([self.vms.c.id]).where(self.vms.c.uuid == uuid)
row = self.connection.execute(sel).fetchone()
if row == None:
return self.vms.insert().execute(uuid=uuid).inserted_primary_key[0]
if row is None:
return self.vms.insert().execute(uuid=uuid). \
inserted_primary_key[0]
else:
return row['id']
@ -128,12 +129,14 @@ class Database(object):
:return: The ID of the host.
:rtype: int
"""
sel = select([self.hosts.c.id]).where(self.hosts.c.hostname == hostname)
sel = select([self.hosts.c.id]). \
where(self.hosts.c.hostname == hostname)
row = self.connection.execute(sel).fetchone()
if row == None:
return self.hosts.insert().execute(hostname=hostname,
cpu_mhz=cpu_mhz,
ram=ram).inserted_primary_key[0]
if row is None:
return self.hosts.insert().execute(
hostname=hostname,
cpu_mhz=cpu_mhz,
ram=ram).inserted_primary_key[0]
else:
self.connection.execute(self.hosts.update().
where(self.hosts.c.id == row['id']).

View File

@ -35,10 +35,10 @@ def init_db(sql_connection):
metadata.bind = engine
hosts = Table('hosts', metadata,
Column('id', Integer, primary_key=True),
Column('hostname', String(255), nullable=False),
Column('cpu_mhz', Integer, nullable=False),
Column('ram', Integer, nullable=False))
Column('id', Integer, primary_key=True),
Column('hostname', String(255), nullable=False),
Column('cpu_mhz', Integer, nullable=False),
Column('ram', Integer, nullable=False))
vms = Table('vms', metadata,
Column('id', Integer, primary_key=True),
@ -48,7 +48,8 @@ def init_db(sql_connection):
Table('vm_resource_usage', metadata,
Column('id', Integer, primary_key=True),
Column('vm_id', Integer, ForeignKey('vms.id'), nullable=False),
Column('timestamp', DateTime, server_default=text('CURRENT_TIMESTAMP')),
Column('timestamp', DateTime,
server_default=text('CURRENT_TIMESTAMP')),
Column('cpu_mhz', Integer, nullable=False))
metadata.create_all()

View File

@ -120,12 +120,12 @@ def validate_params(config, params):
raise_error(401)
return False
if 'reason' not in params or \
params['reason'] == 1 and 'vm_uuids' not in params or \
params['reason'] == 0 and 'host' not in params:
params['reason'] == 1 and 'vm_uuids' not in params or \
params['reason'] == 0 and 'host' not in params:
raise_error(400)
return False
if sha1(params['username']).hexdigest() != config['admin_user'] or \
sha1(params['password']).hexdigest() != config['admin_password']:
sha1(params['password']).hexdigest() != config['admin_password']:
raise_error(403)
return False
return True
@ -134,12 +134,14 @@ def validate_params(config, params):
def start():
""" Start the global manager web service.
"""
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH], REQUIRED_FIELDS)
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH],
REQUIRED_FIELDS)
bottle.debug(True)
bottle.app().state = {
'config': config,
'state': init_state(config)}
bottle.run(host=config['global_manager_host'], port=config['global_manager_port'])
bottle.run(host=config['global_manager_host'],
port=config['global_manager_port'])
@contract
@ -174,9 +176,10 @@ def service():
@bottle.route('/', method='ANY')
def error():
raise bottle.HTTPResponse('Method not allowed: the request has been made' +
'with a method other than the only supported PUT',
405)
raise bottle.HTTPResponse(
'Method not allowed: the request has been made' +
'with a method other than the only supported PUT',
405)
@contract
@ -186,7 +189,7 @@ def init_state(config):
:param config: A config dictionary.
:type config: dict(str: *)
:return: A dictionary containing the initial state of the global managerr.
:return: A dict containing the initial state of the global managerr.
:rtype: dict
"""
return {'previous_time': 0,
@ -274,7 +277,8 @@ def execute_underload(config, state, host):
float(config.get('network_migration_bandwidth')))
if 'vm_placement' not in state:
vm_placement_params = json.loads(config.get('algorithm_vm_placement_params'))
vm_placement_params = json.loads(
config.get('algorithm_vm_placement_params'))
vm_placement_state = None
vm_placement = config.get('algorithm_vm_placement_factory')(
time_step,
@ -451,7 +455,8 @@ def execute_overload(config, state, vm_uuids):
float(config.get('network_migration_bandwidth')))
if 'vm_placement' not in state:
vm_placement_params = json.loads(config.get('algorithm_vm_placement_params'))
vm_placement_params = json.loads(
config.get('algorithm_vm_placement_params'))
vm_placement_state = None
vm_placement = config.get('algorithm_vm_placement_factory')(
time_step,

View File

@ -39,12 +39,13 @@ def best_fit_decreasing_factory(time_step, migration_time, params):
hosts_ram_usage, hosts_ram_total, \
inactive_hosts_cpu, inactive_hosts_ram, \
vms_cpu, vms_ram, state=None: \
(best_fit_decreasing(get_available_resources(params['cpu_threshold'],
hosts_cpu_usage, hosts_cpu_total),
get_available_resources(params['ram_threshold'],
hosts_ram_usage, hosts_ram_total),
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram), {})
(best_fit_decreasing(get_available_resources(
params['cpu_threshold'],
hosts_cpu_usage, hosts_cpu_total),
get_available_resources(params['ram_threshold'],
hosts_ram_usage, hosts_ram_total),
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram), {})
@contract
@ -79,10 +80,10 @@ def best_fit_decreasing(hosts_cpu, hosts_ram,
:param hosts_ram: A map of host names and their available RAM in MB.
:type hosts_ram: dict(str: int)
:param inactive_hosts_cpu: A map of inactive hosts and available CPU in MHz.
:param inactive_hosts_cpu: A map of inactive hosts and available CPU MHz.
:type inactive_hosts_cpu: dict(str: int)
:param inactive_hosts_ram: A map of inactive hosts and available RAM in MB.
:param inactive_hosts_ram: A map of inactive hosts and available RAM MB.
:type inactive_hosts_ram: dict(str: int)
:param vms_cpu: A map of VM UUID and their CPU utilization in MHz.
@ -94,22 +95,24 @@ def best_fit_decreasing(hosts_cpu, hosts_ram,
:return: A map of VM UUIDs to host names, or {} if cannot be solved.
:rtype: dict(str: str)
"""
vms = sorted(((v, vms_ram[k], k) for k, v in vms_cpu.items()), reverse=True)
hosts = sorted(((v, hosts_ram[k], k) for k, v in hosts_cpu.items()))
inactive_hosts = sorted(((v, inactive_hosts_ram[k], k) for k, v
in inactive_hosts_cpu.items()))
vms = sorted(((v, vms_ram[k], k)
for k, v in vms_cpu.items()), reverse=True)
hosts = sorted(((v, hosts_ram[k], k)
for k, v in hosts_cpu.items()))
inactive_hosts = sorted(((v, inactive_hosts_ram[k], k)
for k, v in inactive_hosts_cpu.items()))
mapping = {}
for vm_cpu, vm_ram, vm_uuid in vms:
mapped = False
while not mapped:
for _, _, host in hosts:
if hosts_cpu[host] >= vm_cpu and \
hosts_ram[host] >= vm_ram:
mapping[vm_uuid] = host
hosts_cpu[host] -= vm_cpu
hosts_ram[host] -= vm_ram
mapped = True
break
hosts_ram[host] >= vm_ram:
mapping[vm_uuid] = host
hosts_cpu[host] -= vm_cpu
hosts_ram[host] -= vm_ram
mapped = True
break
else:
if inactive_hosts:
activated_host = inactive_hosts.pop(0)

View File

@ -108,7 +108,8 @@ def start():
:return: The final state.
:rtype: dict(str: *)
"""
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH], REQUIRED_FIELDS)
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH],
REQUIRED_FIELDS)
return common.start(
init_state,
execute,
@ -123,7 +124,7 @@ def init_state(config):
:param config: A config dictionary.
:type config: dict(str: *)
:return: A dictionary containing the initial state of the data collector.
:return: A dict containing the initial state of the data collector.
:rtype: dict
"""
vir_connection = libvirt.openReadOnly(None)
@ -248,7 +249,7 @@ def get_added_vms(previous_vms, current_vms):
:param current_vms: A list of VM at the current time frame.
:type current_vms: list(str)
:return: A list of VM UUIDs that have been added since the last time frame.
:return: A list of VM UUIDs added since the last time frame.
:rtype: list(str)
"""
return substract_lists(current_vms, previous_vms)
@ -264,7 +265,7 @@ def get_removed_vms(previous_vms, current_vms):
:param current_vms: A list of VM at the current time frame.
:type current_vms: list(str)
:return: A list of VM UUIDs that have been removed since the last time frame.
:return: A list of VM UUIDs removed since the last time frame.
:rtype: list(str)
"""
return substract_lists(previous_vms, current_vms)
@ -338,7 +339,8 @@ def write_data_locally(path, data, data_length):
for uuid, values in data.items():
with open(os.path.join(path, uuid), 'w') as f:
if data_length > 0:
f.write('\n'.join([str(x) for x in values[-data_length:]]) + '\n')
f.write('\n'.join([str(x)
for x in values[-data_length:]]) + '\n')
@contract
@ -387,7 +389,7 @@ def get_cpu_mhz(vir_connection, physical_cpus, previous_cpu_time,
:param physical_cpus: The number of physical CPUs.
:type physical_cpus: int
:param previous_cpu_time: A dictionary of previous CPU times for the VMs.
:param previous_cpu_time: A dict of previous CPU times for the VMs.
:type previous_cpu_time: dict(str : int)
:param previous_time: The previous timestamp.
@ -399,7 +401,7 @@ def get_cpu_mhz(vir_connection, physical_cpus, previous_cpu_time,
:param current_vms: A list of VM UUIDs.
:type current_vms: list(str)
:param added_vm_data: A dictionary of VM UUIDs and the corresponding data.
:param added_vm_data: A dict of VM UUIDs and the corresponding data.
:type added_vm_data: dict(str : list(int))
:return: The updated CPU times and average CPU utilization in MHz.

View File

@ -118,7 +118,8 @@ def start():
:return: The final state.
:rtype: dict(str: *)
"""
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH], REQUIRED_FIELDS)
config = read_and_validate_config([DEFAILT_CONFIG_PATH, CONFIG_PATH],
REQUIRED_FIELDS)
return common.start(
init_state,
execute,
@ -140,10 +141,11 @@ def init_state(config):
if vir_connection is None:
print 'Failed to open connection to the hypervisor'
sys.exit(1)
physical_cpu_mhz_total = common.physical_cpu_mhz_total(vir_connection)
return {'previous_time': 0,
'vir_connect': vir_connection,
'db': init_db(config.get('sql_connection')),
'physical_cpu_mhz_total': common.physical_cpu_mhz_total(vir_connection)}
'physical_cpu_mhz_total': physical_cpu_mhz_total}
@contract
@ -195,33 +197,41 @@ def execute(config, state):
return
physical_cpu_mhz_total = int(config.get('physical_cpu_mhz_total'))
host_cpu_utilization = vm_mhz_to_percentage(vm_cpu_mhz, physical_cpu_mhz_total)
host_cpu_utilization = vm_mhz_to_percentage(
vm_cpu_mhz, physical_cpu_mhz_total)
time_step = int(config.get('data_collector_interval'))
migration_time = calculate_migration_time(vm_ram, float(config.get('network_migration_bandwidth')))
migration_time = calculate_migration_time(
vm_ram, float(config.get('network_migration_bandwidth')))
if 'underload_detection' not in state:
underload_detection_params = json.loads(config.get('algorithm_underload_detection_params'))
underload_detection_params = json.loads(
config.get('algorithm_underload_detection_params'))
underload_detection_state = None
underload_detection = config.get('algorithm_underload_detection_factory')(
time_step,
migration_time,
underload_detection_params)
underload_detection = config.get(
'algorithm_underload_detection_factory')(
time_step,
migration_time,
underload_detection_params)
state['underload_detection'] = underload_detection
overload_detection_params = json.loads(config.get('algorithm_overload_detection_params'))
overload_detection_params = json.loads(
config.get('algorithm_overload_detection_params'))
overload_detection_state = None
overload_detection = config.get('algorithm_overload_detection_factory')(
time_step,
migration_time,
overload_detection_params)
overload_detection = config.get(
'algorithm_overload_detection_factory')(
time_step,
migration_time,
overload_detection_params)
state['overload_detection'] = overload_detection
vm_selection_params = json.loads(config.get('algorithm_vm_selection_params'))
vm_selection_params = json.loads(
config.get('algorithm_vm_selection_params'))
vm_selection_state = None
vm_selection = config.get('algorithm_vm_selection_factory')(
time_step,
migration_time,
vm_selection_params)
vm_selection = config.get(
'algorithm_vm_selection_factory')(
time_step,
migration_time,
vm_selection_params)
state['vm_selection'] = vm_selection
else:
underload_detection = state['underload_detection']
@ -231,18 +241,22 @@ def execute(config, state):
vm_selection = state['vm_selection']
vm_selection_state = state['vm_selection_state']
underload, underload_detection_state = underload_detection(host_cpu_utilization, underload_detection_state)
underload, underload_detection_state = underload_detection(
host_cpu_utilization, underload_detection_state)
state['underload_detection_state'] = underload_detection_state
if underload:
# Send a request to the global manager with the host name
pass
else:
overload, overload_detection_state = overload_detection(host_cpu_utilization, overload_detection_state)
overload, overload_detection_state = overload_detection(
host_cpu_utilization, overload_detection_state)
state['overload_detection_state'] = overload_detection_state
if overload:
vms = vm_selection(host_cpu_utilization, vm_ram, vm_selection_state)
# send a request to the global manager with the selected VMs to migrate
vms = vm_selection(
host_cpu_utilization, vm_ram, vm_selection_state)
# send a request to the global manager
# with the selected VMs to migrate
return state
@ -307,7 +321,7 @@ def get_ram(vir_connection, vms):
@contract
def get_max_ram(vir_connection, uuid):
""" Get the maximum RAM allocated to a VM specified by the UUID using libvirt.
""" Get the max RAM allocated to a VM UUID using libvirt.
:param vir_connection: A libvirt connection object.
:type vir_connection: virConnect

View File

@ -48,9 +48,9 @@ def solve2(objective, constraint, step, limit):
try:
res = objective(x, y)
if res > res_best and \
constraint[1](constraint[0](x, y), constraint[2]):
res_best = res
solution = [x, y]
constraint[1](constraint[0](x, y), constraint[2]):
res_best = res
solution = [x, y]
except ZeroDivisionError:
pass
return solution

View File

@ -38,17 +38,21 @@ def init_state(window_sizes, number_of_states):
"""
state = {}
state['previous_state'] = 0
state['request_windows'] = estimation.init_request_windows(number_of_states)
state['estimate_windows'] = estimation.init_deque_structure(window_sizes, number_of_states)
state['variances'] = estimation.init_variances(window_sizes, number_of_states)
state['acceptable_variances'] = estimation.init_variances(window_sizes, number_of_states)
state['request_windows'] = estimation.init_request_windows(
number_of_states)
state['estimate_windows'] = estimation.init_deque_structure(
window_sizes, number_of_states)
state['variances'] = estimation.init_variances(
window_sizes, number_of_states)
state['acceptable_variances'] = estimation.init_variances(
window_sizes, number_of_states)
return state
@contract
def execute(state_config, otf, window_sizes, bruteforce_step,
time_step, migration_time, utilization, state):
""" The MHOD algorithm returning a decision of whether the host is overloaded.
""" The MHOD algorithm returning whether the host is overloaded.
:param state_config: The state configuration.
:type state_config: list(float)
@ -81,25 +85,31 @@ def execute(state_config, otf, window_sizes, bruteforce_step,
max_window_size = max(window_sizes)
state_vector = build_state_vector(state_config, utilization)
state = current_state(state_vector)
selected_windows = estimation.select_window(state['variances'],
state['acceptable_variances'],
window_sizes)
p = estimation.select_best_estimates(state['estimate_windows'],
selected_windows)
selected_windows = estimation.select_window(
state['variances'],
state['acceptable_variances'],
window_sizes)
p = estimation.select_best_estimates(
state['estimate_windows'],
selected_windows)
state['request_windows'] = estimation.update_request_windows(state['request_windows'],
max_window_size,
state['previous_state'],
state)
state['estimate_windows'] = estimation.update_estimate_windows(state['estimate_windows'],
state['request_windows'],
state['previous_state'])
state['variances'] = estimation.update_variances(state['variances'],
state['estimate_windows'],
state['previous_state'])
state['acceptable_variances'] = estimation.update_acceptable_variances(state['acceptable_variances'],
state['estimate_windows'],
state['previous_state'])
state['request_windows'] = estimation.update_request_windows(
state['request_windows'],
max_window_size,
state['previous_state'],
state)
state['estimate_windows'] = estimation.update_estimate_windows(
state['estimate_windows'],
state['request_windows'],
state['previous_state'])
state['variances'] = estimation.update_variances(
state['variances'],
state['estimate_windows'],
state['previous_state'])
state['acceptable_variances'] = estimation.update_acceptable_variances(
state['acceptable_variances'],
state['estimate_windows'],
state['previous_state'])
state['previous_state'] = state
if len(utilization) >= 30:
@ -108,15 +118,17 @@ def execute(state_config, otf, window_sizes, bruteforce_step,
time_in_state_n = get_time_in_state_n(state_config, state_history)
tmp = set(p[state])
if len(tmp) != 1 or tmp[0] != 0:
policy = bruteforce.optimize(step, 1.0, otf, (migration_time / time_step), ls,
p, state_vector, time_in_states, time_in_state_n)
policy = bruteforce.optimize(
step, 1.0, otf, (migration_time / time_step), ls,
p, state_vector, time_in_states, time_in_state_n)
return issue_command_deterministic(policy)
return false
@contract
def build_state_vector(state_config, utilization):
""" Build the current state PMF corresponding to the utilization history and state config.
""" Build the current state PMF corresponding to the utilization
history and state config.
:param state_config: The state configuration.
:type state_config: list(float)

View File

@ -111,7 +111,8 @@ def update_request_windows(request_windows, previous_state, current_state):
@contract
def update_estimate_windows(estimate_windows, request_windows, previous_state):
def update_estimate_windows(estimate_windows, request_windows,
previous_state):
""" Update and return the updated estimate windows.
:param estimate_windows: The previous estimate windows.
@ -127,7 +128,8 @@ def update_estimate_windows(estimate_windows, request_windows, previous_state):
:rtype: list(list(dict))
"""
request_window = request_windows[previous_state]
for state, estimate_window in enumerate(estimate_windows[previous_state]):
state_estimate_windows = estimate_windows[previous_state]
for state, estimate_window in enumerate(state_estimate_windows):
for window_size, estimates in estimate_window.items():
slice_from = len(request_window) - window_size
if slice_from < 0:
@ -162,7 +164,8 @@ def update_variances(variances, estimate_windows, previous_state):
if len(estimates) < window_size:
variance_map[window_size] = 1.0
else:
variance_map[window_size] = variance(list(estimates), window_size)
variance_map[window_size] = variance(
list(estimates), window_size)
return variances
@ -183,10 +186,12 @@ def update_acceptable_variances(acceptable_variances, estimate_windows, previous
:rtype: list(list(dict))
"""
estimate_window = estimate_windows[previous_state]
for state, acceptable_variance_map in enumerate(acceptable_variances[previous_state]):
state_acc_variances = acceptable_variances[previous_state]
for state, acceptable_variance_map in enumerate(state_acc_variances):
for window_size in acceptable_variance_map:
estimates = estimate_window[state][window_size]
acceptable_variance_map[window_size] = acceptable_variance(estimates[-1], window_size)
acceptable_variance_map[window_size] = acceptable_variance(
estimates[-1], window_size)
return acceptable_variances
@ -213,8 +218,9 @@ def select_window(variances, acceptable_variances, window_sizes):
for j in range(n):
selected_size = window_sizes[0]
for window_size in window_sizes:
if variances[i][j][window_size] > acceptable_variances[i][j][window_size]:
break
if variances[i][j][window_size] > \
acceptable_variances[i][j][window_size]:
break
selected_size = window_size
selected_windows[i].append(selected_size)
return selected_windows

View File

@ -43,8 +43,9 @@ def build_objective(ls, state_vector, p):
@contract
def build_constraint(otf, migration_time, ls, state_vector, p, time_in_states, time_in_state_n):
""" Creates a constraint for the optimization problem from the L functions.
def build_constraint(otf, migration_time, ls, state_vector,
p, time_in_states, time_in_state_n):
""" Creates an optimization constraint from the L functions.
:param otf: The OTF parameter.
:type otf: float
@ -71,6 +72,10 @@ def build_constraint(otf, migration_time, ls, state_vector, p, time_in_states, t
:rtype: tuple(function, function, number)
"""
def constraint(*m):
return float(migration_time + time_in_state_n + ls[-1](state_vector, p, m)) / \
(migration_time + time_in_states + sum(l(state_vector, p, m) for l in ls))
return float(migration_time +
time_in_state_n +
ls[-1](state_vector, p, m)) / \
(migration_time +
time_in_states +
sum(l(state_vector, p, m) for l in ls))
return (constraint, operator.le, otf)

View File

@ -35,9 +35,10 @@ def otf_factory(time_step, migration_time, params):
:return: A function implementing the OTF algorithm.
:rtype: function
"""
return lambda utilization, state=None: (otf(params['threshold'],
utilization),
{})
return lambda utilization, state=None: \
(otf(params['threshold'],
utilization),
{})
@contract
@ -56,10 +57,11 @@ def otf_limit_factory(time_step, migration_time, params):
:return: A function implementing the OTF algorithm with limiting.
:rtype: function
"""
return lambda utilization, state=None: (otf_limit(params['threshold'],
params['limit'],
utilization),
{})
return lambda utilization, state=None: \
(otf_limit(params['threshold'],
params['limit'],
utilization),
{})
@contract
@ -79,10 +81,11 @@ def otf_migration_time_factory(time_step, migration_time, params):
:rtype: function
"""
migration_time_normalized = float(migration_time) / time_step
return lambda utilization, state=None: (otf_migration_time(params['threshold'],
migration_time_normalized,
utilization),
{})
return lambda utilization, state=None: \
(otf_migration_time(params['threshold'],
migration_time_normalized,
utilization),
{})
@contract
@ -102,11 +105,12 @@ def otf_limit_migration_time_factory(time_step, migration_time, params):
:rtype: function
"""
migration_time_normalized = float(migration_time) / time_step
return lambda utilization, state=None: (otf_limit_migration_time(params['threshold'],
params['limit'],
migration_time_normalized,
utilization),
{})
return lambda utilization, state=None: \
(otf_limit_migration_time(params['threshold'],
params['limit'],
migration_time_normalized,
utilization),
{})
@contract
@ -122,12 +126,13 @@ def otf(threshold, utilization):
:return: The decision of the algorithm.
:rtype: bool
"""
return float(overloading_steps(utilization)) / len(utilization) > threshold
return float(overloading_steps(utilization)) / len(utilization) > \
threshold
@contract
def otf_limit(threshold, limit, utilization):
""" The OTF threshold algorithm with limiting the minimum utilization values.
""" The OTF threshold algorithm with limiting the utilization history.
:param threshold: The threshold on the OTF value.
:type threshold: float,>=0

View File

@ -40,11 +40,12 @@ def loess_factory(time_step, migration_time, params):
:rtype: function
"""
migration_time_normalized = float(migration_time) / time_step
return lambda utilization, state=None: (loess(params['param'],
params['limit'],
migration_time_normalized,
utilization),
{})
return lambda utilization, state=None: \
(loess(params['param'],
params['limit'],
migration_time_normalized,
utilization),
{})
@contract
@ -64,11 +65,12 @@ def loess_robust_factory(time_step, migration_time, params):
:rtype: function
"""
migration_time_normalized = float(migration_time) / time_step
return lambda utilization, state=None: (loess_robust(params['param'],
params['limit'],
migration_time_normalized,
utilization),
{})
return lambda utilization, state=None: \
(loess_robust(params['param'],
params['limit'],
migration_time_normalized,
utilization),
{})
@contract
@ -87,10 +89,11 @@ def mad_threshold_factory(time_step, migration_time, params):
:return: A function implementing the static threshold algorithm.
:rtype: function
"""
return lambda utilization, state=None: (mad_threshold(params['threshold'],
params['limit'],
utilization),
{})
return lambda utilization, state=None: \
(mad_threshold(params['threshold'],
params['limit'],
utilization),
{})
@contract
@ -109,10 +112,11 @@ def iqr_threshold_factory(time_step, migration_time, params):
:return: A function implementing the static threshold algorithm.
:rtype: function
"""
return lambda utilization, state=None: (iqr_threshold(params['threshold'],
params['limit'],
utilization),
{})
return lambda utilization, state=None: \
(iqr_threshold(params['threshold'],
params['limit'],
utilization),
{})
@contract
@ -360,7 +364,7 @@ def tricube_weights(n):
@contract
def tricube_bisquare_weights(data):
""" Generates a list of weights according to the tricube bisquare function.
""" Generates a weights according to the tricube bisquare function.
:param data: The input data.
:type data: list(float)

View File

@ -54,10 +54,11 @@ def minimum_utilization_factory(time_step, migration_time, params):
:param params: A dictionary containing the algorithm's parameters.
:type params: dict(str: *)
:return: A function implementing the minimum utilization VM selection algorithm.
:return: A function implementing the minimum utilization VM selection.
:rtype: function
"""
return lambda vms_cpu, vms_ram, state=None: (minimum_utilization(vms_cpu), {})
return lambda vms_cpu, vms_ram, state=None: \
(minimum_utilization(vms_cpu), {})
@contract
@ -73,10 +74,11 @@ def minimum_migration_time_factory(time_step, migration_time, params):
:param params: A dictionary containing the algorithm's parameters.
:type params: dict(str: *)
:return: A function implementing the minimum migration time VM selection algorithm.
:return: A function implementing the minimum migration time VM selection.
:rtype: function
"""
return lambda vms_cpu, vms_ram, state=None: (minimum_migration_time(vms_ram), {})
return lambda vms_cpu, vms_ram, state=None: \
(minimum_migration_time(vms_ram), {})
@contract
@ -89,7 +91,8 @@ def minimum_migration_time(vms_ram):
:return: A VM to migrate from the host.
:rtype: str
"""
min_index, min_value = min(enumerate(vms_ram.values()), key=operator.itemgetter(1))
min_index, min_value = min(enumerate(vms_ram.values()),
key=operator.itemgetter(1))
return vms_ram.keys()[min_index]
@ -104,7 +107,8 @@ def minimum_utilization(vms_cpu):
:rtype: str
"""
last_utilization = [x[-1] for x in vms_cpu.values()]
min_index, min_value = min(enumerate(last_utilization), key=operator.itemgetter(1))
min_index, min_value = min(enumerate(last_utilization),
key=operator.itemgetter(1))
return vms_cpu.keys()[min_index]

View File

@ -56,7 +56,8 @@ class GlobalManager(TestCase):
with MockTransaction:
expect(manager).raise_error(400).exactly(5).times()
manager.validate_params({}, {'username': 'test', 'password': 'test'})
manager.validate_params({}, {'username': 'test',
'password': 'test'})
manager.validate_params({}, {'username': 'test',
'password': 'test',
'reason': 1})
@ -74,18 +75,20 @@ class GlobalManager(TestCase):
with MockTransaction:
expect(manager).raise_error(403).exactly(2).times()
manager.validate_params({'admin_user': sha1('test').hexdigest(),
'admin_password': sha1('test2').hexdigest()},
{'username': 'test1',
'password': 'test2',
'reason': 0,
'host': 'test'})
manager.validate_params({'admin_user': sha1('test1').hexdigest(),
'admin_password': sha1('test').hexdigest()},
{'username': 'test1',
'password': 'test2',
'reason': 0,
'host': 'test'})
manager.validate_params(
{'admin_user': sha1('test').hexdigest(),
'admin_password': sha1('test2').hexdigest()},
{'username': 'test1',
'password': 'test2',
'reason': 0,
'host': 'test'})
manager.validate_params(
{'admin_user': sha1('test1').hexdigest(),
'admin_password': sha1('test').hexdigest()},
{'username': 'test1',
'password': 'test2',
'reason': 0,
'host': 'test'})
assert manager.validate_params(
{'admin_user': sha1('test1').hexdigest(),
@ -93,7 +96,7 @@ class GlobalManager(TestCase):
{'username': 'test1',
'password': 'test2',
'reason': 1,
'vm_uuids': ['qwe', 'asd']}) == True
'vm_uuids': ['qwe', 'asd']})
assert manager.validate_params(
{'admin_user': sha1('test1').hexdigest(),
@ -101,7 +104,7 @@ class GlobalManager(TestCase):
{'username': 'test1',
'password': 'test2',
'reason': 0,
'host': 'test'}) == True
'host': 'test'})
def test_start(self):
with MockTransaction:
@ -112,9 +115,9 @@ class GlobalManager(TestCase):
paths = [manager.DEFAILT_CONFIG_PATH, manager.CONFIG_PATH]
fields = manager.REQUIRED_FIELDS
expect(manager).read_and_validate_config(paths, fields). \
and_return(config).once()
and_return(config).once()
expect(manager).init_state(config). \
and_return(state).once()
and_return(state).once()
expect(bottle).app().and_return(app).once()
expect(bottle).run(host='localhost', port=8080).once()
manager.start()
@ -131,12 +134,12 @@ class GlobalManager(TestCase):
'os_auth_url': 'url',
'compute_hosts': 'host1, host2'}
expect(manager).init_db('db').and_return(db).once()
expect(client).Client('user',
'password',
'tenant',
'url',
service_type='compute').and_return(nova).once()
expect(manager).parse_compute_hosts('host1, host2').and_return(hosts).once()
expect(client).Client(
'user', 'password', 'tenant', 'url',
service_type='compute'). \
and_return(nova).once()
expect(manager).parse_compute_hosts('host1, host2'). \
and_return(hosts).once()
state = manager.init_state(config)
assert state['previous_time'] == 0
assert state['db'] == db
@ -145,8 +148,10 @@ class GlobalManager(TestCase):
def test_parse_compute_hosts(self):
assert manager.parse_compute_hosts('') == []
assert manager.parse_compute_hosts('test1, test2') == ['test1', 'test2']
assert manager.parse_compute_hosts('t1,, t2 , t3') == ['t1', 't2', 't3']
assert manager.parse_compute_hosts('test1, test2') == \
['test1', 'test2']
assert manager.parse_compute_hosts('t1,, t2 , t3') == \
['t1', 't2', 't3']
def test_service(self):
app = mock('app')
@ -161,7 +166,8 @@ class GlobalManager(TestCase):
'host': 'host'}
expect(manager).get_params(Any).and_return(params).once()
expect(bottle).app().and_return(app).once()
expect(manager).validate_params(config, params).and_return(True).once()
expect(manager).validate_params(config, params). \
and_return(True).once()
expect(manager).execute_underload(config, state, 'host').once()
manager.service()
@ -170,8 +176,10 @@ class GlobalManager(TestCase):
'vm_uuids': 'vm_uuids'}
expect(manager).get_params(Any).and_return(params).once()
expect(bottle).app().and_return(app).once()
expect(manager).validate_params(config, params).and_return(True).once()
expect(manager).execute_overload(config, state, 'vm_uuids').once()
expect(manager).validate_params(config, params). \
and_return(True).once()
expect(manager).execute_overload(config, state, 'vm_uuids'). \
once()
manager.service()
@qc(20)
@ -243,7 +251,8 @@ class GlobalManager(TestCase):
host1.memory_mb = 4000
host2 = mock('host2')
host2.memory_mb = 3000
expect(nova.hosts).get(hostname).and_return([host1, host2]).once()
expect(nova.hosts).get(hostname). \
and_return([host1, host2]).once()
assert manager.host_used_ram(nova, hostname) == 3000
def test_flavors_ram(self):
@ -264,7 +273,8 @@ class GlobalManager(TestCase):
nova = mock('nova')
nova.servers = mock('servers')
flavors_to_ram = {'1': 512, '2': 1024}
expect(manager).flavors_ram(nova).and_return(flavors_to_ram).once()
expect(manager).flavors_ram(nova). \
and_return(flavors_to_ram).once()
vm1 = mock('vm1')
vm1.flavor = {'id': '1'}

View File

@ -59,15 +59,16 @@ class BinPacking(TestCase):
'vm3': 2048}
self.assertEqual(alg(hosts_cpu_usage, hosts_cpu_total,
hosts_ram_usage, hosts_ram_total,
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram), ({
'vm1': 'host6',
'vm2': 'host1',
'vm3': 'host3'}, {}))
hosts_ram_usage, hosts_ram_total,
inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram), ({
'vm1': 'host6',
'vm2': 'host1',
'vm3': 'host3'}, {}))
def test_get_available_resources(self):
self.assertEqual(packing.get_available_resources(0.8,
self.assertEqual(packing.get_available_resources(
0.8,
{'host1': 700, 'host2': 200}, {'host1': 1000, 'host2': 2000}),
{'host1': 100, 'host2': 1400})
@ -94,9 +95,9 @@ class BinPacking(TestCase):
assert packing.best_fit_decreasing(
hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host3',
'vm2': 'host2',
'vm3': 'host1'}
'vm1': 'host3',
'vm2': 'host2',
'vm3': 'host1'}
hosts_cpu = {
'host1': 3000,
@ -120,9 +121,9 @@ class BinPacking(TestCase):
assert packing.best_fit_decreasing(
hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host1',
'vm2': 'host2',
'vm3': 'host3'}
'vm1': 'host1',
'vm2': 'host2',
'vm3': 'host3'}
hosts_cpu = {
'host1': 3000,
@ -146,9 +147,9 @@ class BinPacking(TestCase):
assert packing.best_fit_decreasing(
hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host1',
'vm2': 'host1',
'vm3': 'host3'}
'vm1': 'host1',
'vm2': 'host1',
'vm3': 'host3'}
hosts_cpu = {
'host1': 3000,
@ -201,9 +202,9 @@ class BinPacking(TestCase):
assert packing.best_fit_decreasing(
hosts_cpu, hosts_ram, inactive_hosts_cpu, inactive_hosts_ram,
vms_cpu, vms_ram) == {
'vm1': 'host6',
'vm2': 'host1',
'vm3': 'host3'}
'vm1': 'host6',
'vm2': 'host1',
'vm3': 'host3'}
hosts_cpu = {
'host1': 3000,

View File

@ -36,15 +36,20 @@ class Bruteforce(TestCase):
def fn4(x, y):
return x / y
self.assertEqual([round(x, 1) for x in b.solve2(fn1, (fn1, le, 10), 0.1, 1.0)],
self.assertEqual([round(x, 1)
for x in b.solve2(fn1, (fn1, le, 10), 0.1, 1.0)],
[1.0, 1.0])
self.assertEqual([round(x, 1) for x in b.solve2(fn1, (fn1, le, 0.5), 0.1, 1.0)],
self.assertEqual([round(x, 1)
for x in b.solve2(fn1, (fn1, le, 0.5), 0.1, 1.0)],
[0.0, 0.5])
self.assertEqual([round(x, 1) for x in b.solve2(fn2, (fn1, le, 0.5), 0.1, 1.0)],
self.assertEqual([round(x, 1)
for x in b.solve2(fn2, (fn1, le, 0.5), 0.1, 1.0)],
[0.5, 0.0])
self.assertEqual([round(x, 1) for x in b.solve2(fn3, (fn3, le, 10), 0.1, 1.0)],
self.assertEqual([round(x, 1)
for x in b.solve2(fn3, (fn3, le, 10), 0.1, 1.0)],
[1.0, 0.0])
self.assertEqual([round(x, 1) for x in b.solve2(fn4, (fn4, le, 10), 0.1, 1.0)],
self.assertEqual([round(x, 1)
for x in b.solve2(fn4, (fn4, le, 10), 0.1, 1.0)],
[1.0, 0.1])
def test_optimize(self):
@ -61,11 +66,15 @@ class Bruteforce(TestCase):
objective = mock('objective')
constraint = mock('constraint')
solution = [1, 2, 3]
expect(nlp).build_objective(ls, state_vector, p).and_return(objective).once()
expect(nlp).build_constraint(otf, migration_time, ls, state_vector,
p, time_in_states, time_in_state_n). \
and_return(constraint).once()
expect(b).solve2(objective, constraint, step, limit).and_return(solution).once()
self.assertEqual(b.optimize(step, limit, otf, migration_time, ls,
p, state_vector, time_in_states, time_in_state_n),
expect(nlp).build_objective(ls, state_vector, p). \
and_return(objective).once()
expect(nlp).build_constraint(
otf, migration_time, ls, state_vector,
p, time_in_states, time_in_state_n). \
and_return(constraint).once()
expect(b).solve2(objective, constraint, step, limit). \
and_return(solution).once()
self.assertEqual(
b.optimize(step, limit, otf, migration_time, ls,
p, state_vector, time_in_states, time_in_state_n),
solution)

View File

@ -58,16 +58,26 @@ class Core(TestCase):
def test_build_state_vector(self):
state_config = [0.4, 0.7]
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.1]), [1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.2]), [1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.3]), [1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.4]), [0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.5]), [0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.6]), [0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.7]), [0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.8]), [0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.9]), [0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 1.0]), [0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.1]),
[1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.2]),
[1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.3]),
[1, 0, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.4]),
[0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.5]),
[0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.6]),
[0, 1, 0])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.7]),
[0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.8]),
[0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 0.9]),
[0, 0, 1])
self.assertEqual(c.build_state_vector(state_config, [0.0, 1.0]),
[0, 0, 1])
def test_current_state(self):
self.assertEqual(c.current_state([1, 0, 0]), 0)
@ -76,9 +86,11 @@ class Core(TestCase):
def test_utilization_to_states(self):
state_config = [0.4, 0.7]
data = [0.25, 0.30, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73, 0.68, 0.69,
0.52, 0.51, 0.25, 0.38, 0.46, 0.52, 0.55, 0.58, 0.65, 0.70]
states = [0, 0, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2]
data = [0.25, 0.30, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.25, 0.38, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
states = [0, 0, 1, 1, 1, 2, 2, 2, 2, 1, 1,
1, 1, 0, 0, 1, 1, 1, 1, 1, 2]
self.assertEqual(c.utilization_to_states(state_config, data), states)
state_config = [1.0]
@ -88,7 +100,8 @@ class Core(TestCase):
def test_get_time_in_state_n(self):
state_config = [0.4, 0.7]
states = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2]
states = [0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 1,
1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 2]
self.assertEqual(c.get_time_in_state_n(state_config, states), 5)
def test_issue_command_deterministic(self):

View File

@ -41,7 +41,7 @@ class Multisize(TestCase):
self.assertEqual(m.variance([0, 0], 100), 0.0)
self.assertAlmostEqual(m.variance([1, 1], 100), 0.0194020202)
self.assertAlmostEqual(m.variance([0, 1], 100), 0.0099010101)
self.assertAlmostEqual(m.variance([1, 2, 3, 4, 5], 100), 0.5112373737)
self.assertAlmostEqual(m.variance([1, 2, 3, 4, 5], 100), 0.511237373)
self.assertAlmostEqual(m.variance([0, 0, 0, 1], 100), 0.0099030303)
def test_acceptable_variance(self):
@ -50,106 +50,136 @@ class Multisize(TestCase):
def test_estimate_probability(self):
self.assertEqual(
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 0), 0.08)
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 0),
0.08)
self.assertEqual(
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 1), 0.02)
m.estimate_probability([0, 0, 1, 1, 0, 0, 0, 0, 0, 0], 100, 1),
0.02)
self.assertEqual(
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 0), 0.01)
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 0),
0.01)
self.assertEqual(
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 1), 0.04)
m.estimate_probability([1, 1, 0, 0, 1, 1, 1, 1, 1, 1], 200, 1),
0.04)
def test_update_request_windows(self):
max_window_size = 4
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0), [deque([0, 0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1), [deque([0, 0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0), [deque([0, 0]),
deque([1, 1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1), [deque([0, 0]),
deque([1, 1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1, 1])])
max_window_size = 2
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0), [deque([0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1), [deque([0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0), [deque([0, 0]),
deque([1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1), [deque([0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 1]),
deque([1, 1])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 0])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1])])
max_window_size = 4
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size),
deque([2, 2], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0), [deque([0, 0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1), [deque([0, 0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2), [deque([0, 0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0), [deque([0, 0]),
deque([1, 1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1), [deque([0, 0]),
deque([1, 1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2), [deque([0, 0]),
deque([1, 1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0), [deque([0, 0]),
deque([1, 1]),
deque([2, 2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1), [deque([0, 0]),
deque([1, 1]),
deque([2, 2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2), [deque([0, 0]),
deque([1, 1]),
deque([2, 2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2),
[deque([0, 0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2),
[deque([0, 0]),
deque([1, 1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2, 2])])
max_window_size = 2
windows = [deque([0, 0], max_window_size),
deque([1, 1], max_window_size),
deque([2, 2], max_window_size)]
self.assertEqual(m.update_request_windows(c(windows), 0, 0), [deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1), [deque([0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2), [deque([0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0), [deque([0, 0]),
deque([1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1), [deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2), [deque([0, 0]),
deque([1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0), [deque([0, 0]),
deque([1, 1]),
deque([2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1), [deque([0, 0]),
deque([1, 1]),
deque([2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2), [deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 1),
[deque([0, 1]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 0, 2),
[deque([0, 2]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 0),
[deque([0, 0]),
deque([1, 0]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 1, 2),
[deque([0, 0]),
deque([1, 2]),
deque([2, 2])])
self.assertEqual(m.update_request_windows(c(windows), 2, 0),
[deque([0, 0]),
deque([1, 1]),
deque([2, 0])])
self.assertEqual(m.update_request_windows(c(windows), 2, 1),
[deque([0, 0]),
deque([1, 1]),
deque([2, 1])])
self.assertEqual(m.update_request_windows(c(windows), 2, 2),
[deque([0, 0]),
deque([1, 1]),
deque([2, 2])])
def test_update_estimate_windows(self):
req_win = [deque([1, 0, 0, 0]),
@ -163,24 +193,26 @@ class Multisize(TestCase):
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}]]
self.assertEqual(m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 1.0]),
4: deque([0, 0, 0.75])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 1.0]),
4: deque([0, 0, 0.75])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])}]])
req_win = [deque([1, 0, 2, 0]),
deque([1, 0, 1, 0]),
@ -204,63 +236,66 @@ class Multisize(TestCase):
{2: deque([0, 0], 2),
4: deque([0, 0], 4)}]]
self.assertEqual(m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(m.update_estimate_windows(c(est_win), c(req_win), 2),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.5])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 0),
[[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 1),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.5])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}]])
self.assertEqual(
m.update_estimate_windows(c(est_win), c(req_win), 2),
[[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])},
{2: deque([0, 0]),
4: deque([0, 0])}],
[{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.5]),
4: deque([0, 0, 0.25])},
{2: deque([0, 0.0]),
4: deque([0, 0, 0.5])}]])
def test_update_variances(self):
est_win = [[{2: deque([0, 0.5], 2),
@ -443,8 +478,8 @@ class Multisize(TestCase):
{2: 0.09375,
4: 0.0625}]])
self.assertEqual(m.update_acceptable_variances(
m.update_acceptable_variances(c(acc_variances), c(est_win), 0),
c(est_win), 0),
m.update_acceptable_variances(
c(acc_variances), c(est_win), 0), c(est_win), 0),
[[{2: 0.125,
4: 0.0},
{2: 0.125,
@ -571,7 +606,8 @@ class Multisize(TestCase):
4: 0.5}]]
window_sizes = [2, 4]
self.assertEqual(m.select_window(variances, acc_variances, window_sizes),
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[2, 2],
[4, 2]])
@ -593,7 +629,8 @@ class Multisize(TestCase):
4: 0.5}]]
window_sizes = [2, 4]
self.assertEqual(m.select_window(variances, acc_variances, window_sizes),
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[2, 4],
[4, 2]])
@ -635,7 +672,8 @@ class Multisize(TestCase):
4: 0.9}]]
window_sizes = [2, 4]
self.assertEqual(m.select_window(variances, acc_variances, window_sizes),
self.assertEqual(
m.select_window(variances, acc_variances, window_sizes),
[[4, 4, 2],
[4, 4, 4],
[4, 2, 2]])
@ -666,12 +704,14 @@ class Multisize(TestCase):
[2, 2, 2],
[2, 4, 2]]
self.assertEqual(m.select_best_estimates(c(est_win), selected_windows1),
self.assertEqual(
m.select_best_estimates(c(est_win), selected_windows1),
[[0, 1, 0],
[0.25, 0.5, 0.15],
[0, 1, 0]])
self.assertEqual(m.select_best_estimates(c(est_win), selected_windows2),
self.assertEqual(
m.select_best_estimates(c(est_win), selected_windows2),
[[0, 1, 0],
[0.25, 0.5, 0.25],
[0, 0.2, 0]])
@ -685,11 +725,13 @@ class Multisize(TestCase):
{2: deque(),
4: deque()}]]
self.assertEqual(m.select_best_estimates(c(est_win), [[2, 4], [4, 2]]),
self.assertEqual(
m.select_best_estimates(c(est_win), [[2, 4], [4, 2]]),
[[0.0, 0.0],
[0.0, 0.0]])
self.assertEqual(m.select_best_estimates(c(est_win), [[2, 2], [4, 4]]),
self.assertEqual(
m.select_best_estimates(c(est_win), [[2, 2], [4, 4]]),
[[0.0, 0.0],
[0.0, 0.0]])
@ -795,9 +837,12 @@ class Multisize(TestCase):
self.assertEqual(structure[2][2][4].maxlen, 4)
def test_init_selected_window_sizes(self):
self.assertEqual(m.init_selected_window_sizes([2, 4], 1), [[2]])
self.assertEqual(m.init_selected_window_sizes([2, 4], 2), [[2, 2],
[2, 2]])
self.assertEqual(m.init_selected_window_sizes([2, 4], 3), [[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 1), [[2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 2), [[2, 2],
[2, 2]])
self.assertEqual(
m.init_selected_window_sizes([2, 4], 3), [[2, 2, 2],
[2, 2, 2],
[2, 2, 2]])

View File

@ -52,13 +52,16 @@ class Nlp(TestCase):
m = (m1, m2)
container = mock('function container')
expect(container).l0(state_vector, p, m).and_return(2).once()
expect(container).l1(state_vector, p, m).and_return(3).exactly(2).times()
expect(container).l1(state_vector, p, m). \
and_return(3).exactly(2).times()
ls = [container.l0, container.l1]
constraint = nlp.build_constraint(otf, migration_time, ls, state_vector, p, 0, 0)
constraint = nlp.build_constraint(otf, migration_time,
ls, state_vector, p, 0, 0)
self.assertTrue(hasattr(constraint[0], '__call__'))
self.assertIs(constraint[1], operator.le)
self.assertEqual(constraint[2], otf)
self.assertEqual(constraint[0](m1, m2),
float(migration_time + 3) / (migration_time + 5))
float(migration_time + 3) /
(migration_time + 5))

View File

@ -40,57 +40,78 @@ class Otf(TestCase):
self.assertFalse(otf.otf_limit(0.5, 5, [0.9, 0.8, 1.1, 1.2, 0.3]))
def test_otf_migration_time(self):
self.assertTrue(otf.otf_migration_time(0.5, 100, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(otf.otf_migration_time(0.5, 100, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_migration_time(0.5, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_migration_time(0.5, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_migration_time(
0.5, 100, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(otf.otf_migration_time(
0.5, 100, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_migration_time(
0.5, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_migration_time(
0.5, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
def test_otf_limit_migration_time(self):
self.assertFalse(otf.otf_limit_migration_time(0.5, 10, 100, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_limit_migration_time(0.5, 10, 100, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertFalse(otf.otf_limit_migration_time(0.5, 10, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_limit_migration_time(0.5, 10, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_limit_migration_time(0.5, 5, 100, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(otf.otf_limit_migration_time(0.5, 5, 100, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_limit_migration_time(0.5, 5, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_limit_migration_time(0.5, 5, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertFalse(otf.otf_limit_migration_time(
0.5, 10, 100, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_limit_migration_time(
0.5, 10, 100, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertFalse(otf.otf_limit_migration_time(
0.5, 10, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_limit_migration_time(
0.5, 10, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_limit_migration_time(
0.5, 5, 100, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertTrue(otf.otf_limit_migration_time(
0.5, 5, 100, [0.9, 0.8, 1.1, 1.2, 0.3]))
self.assertTrue(otf.otf_limit_migration_time(
0.5, 5, 1, [0.9, 0.8, 1.1, 1.2, 1.3]))
self.assertFalse(otf.otf_limit_migration_time(
0.5, 5, 1, [0.9, 0.8, 1.1, 1.2, 0.3]))
def test_otf_factory(self):
alg = otf.otf_factory(300, 20, {'threshold': 0.5})
alg = otf.otf_factory(
300, 20, {'threshold': 0.5})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
def test_otf_limit_factory(self):
alg = otf.otf_limit_factory(300, 20, {'threshold': 0.5, 'limit': 10})
alg = otf.otf_limit_factory(
300, 20, {'threshold': 0.5, 'limit': 10})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (False, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
alg = otf.otf_limit_factory(300, 20, {'threshold': 0.5, 'limit': 5})
alg = otf.otf_limit_factory(
300, 20, {'threshold': 0.5, 'limit': 5})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
def test_otf_migration_time_factory(self):
alg = otf.otf_migration_time_factory(30, 3000, {'threshold': 0.5})
alg = otf.otf_migration_time_factory(
30, 3000, {'threshold': 0.5})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (True, {}))
alg = otf.otf_migration_time_factory(300, 1, {'threshold': 0.5})
alg = otf.otf_migration_time_factory(
300, 1, {'threshold': 0.5})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
def test_otf_limit_migration_time_factory(self):
alg = otf.otf_limit_migration_time_factory(30, 3000, {'threshold': 0.5, 'limit': 10})
alg = otf.otf_limit_migration_time_factory(
30, 3000, {'threshold': 0.5, 'limit': 10})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (False, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
alg = otf.otf_limit_migration_time_factory(300, 1, {'threshold': 0.5, 'limit': 10})
alg = otf.otf_limit_migration_time_factory(
300, 1, {'threshold': 0.5, 'limit': 10})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (False, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))
alg = otf.otf_limit_migration_time_factory(30, 3000, {'threshold': 0.5, 'limit': 5})
alg = otf.otf_limit_migration_time_factory(
30, 3000, {'threshold': 0.5, 'limit': 5})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (True, {}))
alg = otf.otf_limit_migration_time_factory(300, 1, {'threshold': 0.5, 'limit': 5})
alg = otf.otf_limit_migration_time_factory(
300, 1, {'threshold': 0.5, 'limit': 5})
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 1.3]), (True, {}))
self.assertEqual(alg([0.9, 0.8, 1.1, 1.2, 0.3]), (False, {}))

View File

@ -21,31 +21,38 @@ import neat.locals.overload.statistics as stats
class Statistics(TestCase):
def test_loess_factory(self):
alg = stats.loess_factory(300, 20, {'param': 1.2, 'limit': 3})
alg = stats.loess_factory(
300, 20, {'param': 1.2, 'limit': 3})
self.assertEqual(alg([]), (False, {}))
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18, 1.15, 1.04, 1.10, 1.16, 1.08]
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
self.assertEqual(alg(data), (True, {}))
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73, 0.68, 0.69,
0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55, 0.58, 0.65, 0.70]
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
self.assertEqual(alg(data), (False, {}))
def test_loess_robust_factory(self):
alg = stats.loess_robust_factory(300, 20, {'param': 1.2, 'limit': 3})
alg = stats.loess_robust_factory(
300, 20, {'param': 1.2, 'limit': 3})
self.assertEqual(alg([]), (False, {}))
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18, 1.15, 1.04, 1.10, 1.16, 1.08]
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
self.assertEqual(alg(data), (True, {}))
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73, 0.68, 0.69,
0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55, 0.58, 0.65, 0.70]
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
self.assertEqual(alg(data), (False, {}))
def test_mad_threshold_factory(self):
with MockTransaction:
expect(stats).mad.and_return(0.125).exactly(6).times()
alg = stats.mad_threshold_factory(300, 20, {'threshold': 1.6, 'limit': 3})
alg = stats.mad_threshold_factory(
300, 20, {'threshold': 1.6, 'limit': 3})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0., 0., 0.0]), (False, {}))
self.assertEqual(alg([0., 0., 0.5]), (False, {}))
@ -57,7 +64,8 @@ class Statistics(TestCase):
def test_iqr_threshold_factory(self):
with MockTransaction:
expect(stats).iqr.and_return(0.125).exactly(6).times()
alg = stats.iqr_threshold_factory(300, 20, {'threshold': 1.6, 'limit': 3})
alg = stats.iqr_threshold_factory(
300, 20, {'threshold': 1.6, 'limit': 3})
self.assertEqual(alg([]), (False, {}))
self.assertEqual(alg([0., 0., 0.0]), (False, {}))
self.assertEqual(alg([0., 0., 0.5]), (False, {}))
@ -67,58 +75,65 @@ class Statistics(TestCase):
self.assertEqual(alg([0., 0., 1.0]), (True, {}))
def test_loess(self):
assert stats.loess(1.2, 3, 0.5, []) == False
assert not stats.loess(1.2, 3, 0.5, [])
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18, 1.15, 1.04, 1.10, 1.16, 1.08]
assert stats.loess(1.2, 3, 0.5, data) == True
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
assert stats.loess(1.2, 3, 0.5, data)
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73, 0.68, 0.69,
0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55, 0.58, 0.65, 0.70]
assert stats.loess(1.2, 3, 0.5, data) == False
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
assert not stats.loess(1.2, 3, 0.5, data)
def test_loess_robust(self):
assert stats.loess_robust(1.2, 3, 0.5, []) == False
assert not stats.loess_robust(1.2, 3, 0.5, [])
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18, 1.15, 1.04, 1.10, 1.16, 1.08]
assert stats.loess_robust(1.2, 3, 0.5, data) == True
data = [1.05, 1.09, 1.07, 1.12, 1.02, 1.18,
1.15, 1.04, 1.10, 1.16, 1.08]
assert stats.loess_robust(1.2, 3, 0.5, data)
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73, 0.68, 0.69,
0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55, 0.58, 0.65, 0.70]
assert stats.loess_robust(1.2, 3, 0.5, data) == False
data = [0.55, 0.60, 0.62, 0.59, 0.67, 0.73, 0.85, 0.97, 0.73,
0.68, 0.69, 0.52, 0.51, 0.55, 0.48, 0.46, 0.52, 0.55,
0.58, 0.65, 0.70]
assert not stats.loess_robust(1.2, 3, 0.5, data)
def test_mad_threshold(self):
with MockTransaction:
expect(stats).mad.and_return(0.125).exactly(6).times()
assert stats.mad_threshold(1., 3, []) == False
assert stats.mad_threshold(1., 3, [0., 0., 0.]) == False
assert stats.mad_threshold(1.6, 3, [0., 0., 0.5]) == False
assert stats.mad_threshold(1.6, 3, [0., 0., 0.6]) == False
assert stats.mad_threshold(1.6, 3, [0., 0., 0.8]) == True
assert stats.mad_threshold(1.6, 3, [0., 0., 0.9]) == True
assert stats.mad_threshold(1.6, 3, [0., 0., 1.0]) == True
assert not stats.mad_threshold(1., 3, [])
assert not stats.mad_threshold(1., 3, [0., 0., 0.])
assert not stats.mad_threshold(1.6, 3, [0., 0., 0.5])
assert not stats.mad_threshold(1.6, 3, [0., 0., 0.6])
assert stats.mad_threshold(1.6, 3, [0., 0., 0.8])
assert stats.mad_threshold(1.6, 3, [0., 0., 0.9])
assert stats.mad_threshold(1.6, 3, [0., 0., 1.0])
def test_iqr_threshold(self):
with MockTransaction:
expect(stats).iqr.and_return(0.125).exactly(6).times()
assert stats.iqr_threshold(1., 3, []) == False
assert stats.iqr_threshold(1., 3, [0., 0., 0.]) == False
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.5]) == False
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.6]) == False
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.8]) == True
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.9]) == True
assert stats.iqr_threshold(1.6, 3, [0., 0., 1.0]) == True
assert not stats.iqr_threshold(1., 3, [])
assert not stats.iqr_threshold(1., 3, [0., 0., 0.])
assert not stats.iqr_threshold(1.6, 3, [0., 0., 0.5])
assert not stats.iqr_threshold(1.6, 3, [0., 0., 0.6])
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.8])
assert stats.iqr_threshold(1.6, 3, [0., 0., 0.9])
assert stats.iqr_threshold(1.6, 3, [0., 0., 1.0])
def test_utilization_threshold_abstract(self):
f = lambda x: 0.8
assert stats.utilization_threshold_abstract(f, 3, []) == False
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0.]) == False
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 1.0]) == True
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.]) == False
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.5]) == False
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.7]) == False
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.8]) == True
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.9]) == True
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 1.0]) == True
assert not stats.utilization_threshold_abstract(f, 3, [])
assert not stats.utilization_threshold_abstract(f, 3, [0., 0., 0.])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 1.0])
assert not stats.utilization_threshold_abstract(
f, 3, [0., 0., 0., 0.])
assert not stats.utilization_threshold_abstract(
f, 3, [0., 0., 0., 0.5])
assert not stats.utilization_threshold_abstract(
f, 3, [0., 0., 0., 0.7])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.8])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 0.9])
assert stats.utilization_threshold_abstract(f, 3, [0., 0., 0., 1.0])
def test_mad(self):
data = [1, 1, 2, 2, 4, 6, 9]
@ -154,7 +169,8 @@ class Statistics(TestCase):
for actual, expected in zip(
stats.tricube_weights(10),
[0.148, 0.148, 0.148, 0.348, 0.568, 0.759, 0.892, 0.967, 0.995, 1.0]):
[0.148, 0.148, 0.148, 0.348, 0.568, 0.759,
0.892, 0.967, 0.995, 1.0]):
self.assertAlmostEqual(actual, expected, 2)
def test_tricube_bisquare_weights(self):

View File

@ -26,7 +26,8 @@ class Trivial(TestCase):
migration_time=int_(min=0, max=10),
utilization=list_(of=float)
):
alg = trivial.no_migrations_factory(time_step, migration_time, {'threshold': 0.5})
alg = trivial.no_migrations_factory(time_step, migration_time,
{'threshold': 0.5})
assert alg(utilization) == (False, {})
def test_threshold(self):

View File

@ -48,7 +48,8 @@ class Collector(TestCase):
def init_state():
with MockTransaction:
vir_connection = mock('virConnect')
expect(libvirt).openReadOnly(None).and_return(vir_connection).once()
expect(libvirt).openReadOnly(None). \
and_return(vir_connection).once()
physical_cpus = 13
expect(common).physical_cpu_count(vir_connection). \
and_return(physical_cpus).once()
@ -130,14 +131,14 @@ class Collector(TestCase):
min_length=0, max_length=5
)
):
previous_vms = list(x)
prev_vms = list(x)
removed = []
if x:
to_remove = random.randrange(len(x))
for _ in xrange(to_remove):
removed.append(x.pop(random.randrange(len(x))))
x.extend(y)
assert set(collector.get_removed_vms(previous_vms, x)) == set(removed)
assert set(collector.get_removed_vms(prev_vms, x)) == set(removed)
@qc
def substract_lists(
@ -167,7 +168,8 @@ class Collector(TestCase):
local_data_directory_tmp)
assert len(os.listdir(local_data_directory_tmp)) == 3
collector.cleanup_local_data(local_data_directory_tmp, [vm1, vm2, vm3])
collector.cleanup_local_data(local_data_directory_tmp,
[vm1, vm2, vm3])
assert len(os.listdir(local_data_directory_tmp)) == 0
os.rmdir(local_data_directory_tmp)
@ -176,7 +178,8 @@ class Collector(TestCase):
def fetch_remote_data(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
),
data_length=int_(min=1, max=10)
@ -197,12 +200,14 @@ class Collector(TestCase):
def write_data_locally(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=0, max_length=3
),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__), '..', 'resources', 'vms', 'tmp')
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
collector.write_data_locally(path, x, data_length)
@ -212,7 +217,8 @@ class Collector(TestCase):
for uuid in x.keys():
file = os.path.join(path, uuid)
with open(file, 'r') as f:
result[uuid] = [int(a) for a in f.read().strip().splitlines()]
result[uuid] = [int(a)
for a in f.read().strip().splitlines()]
shutil.rmtree(path)
@ -234,7 +240,8 @@ class Collector(TestCase):
),
data_length=int_(min=0, max=10)
):
path = os.path.join(os.path.dirname(__file__), '..', 'resources', 'vms', 'tmp')
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
original_data = {}
@ -259,7 +266,8 @@ class Collector(TestCase):
for uuid in x.keys():
file = os.path.join(path, uuid)
with open(file, 'r') as f:
result[uuid] = [int(a) for a in f.read().strip().splitlines()]
result[uuid] = [int(a)
for a in f.read().strip().splitlines()]
shutil.rmtree(path)
@ -319,7 +327,8 @@ class Collector(TestCase):
with MockTransaction:
def mock_get_cpu_time(vir_connection, uuid):
if uuid in original_vm_data:
return original_vm_data[uuid][0] + original_vm_data[uuid][1]
return original_vm_data[uuid][0] + \
original_vm_data[uuid][1]
else:
return added_vms[uuid][0]
@ -358,9 +367,10 @@ class Collector(TestCase):
vms.extend(added_vms.keys())
result = collector.get_cpu_mhz(connection, cpus, previous_cpu_time,
previous_time, current_time, vms,
added_vm_data)
result = collector.get_cpu_mhz(
connection, cpus, previous_cpu_time,
previous_time, current_time, vms,
added_vm_data)
assert result[0] == current_cpu_time
assert result[1] == cpu_mhz
@ -404,4 +414,5 @@ class Collector(TestCase):
connection = libvirt.virConnect()
expect(connection).getInfo().and_return(
['x86_64', ram, cores, mhz, 1, 1, 4, 2]).once()
assert collector.get_host_characteristics(connection) == (cores * mhz, ram)
assert collector.get_host_characteristics(connection) == \
(cores * mhz, ram)

View File

@ -71,7 +71,8 @@ class LocalManager(TestCase):
min_length=0, max_length=5
)
):
path = os.path.join(os.path.dirname(__file__), '..', 'resources', 'vms', 'tmp')
path = os.path.join(os.path.dirname(__file__),
'..', 'resources', 'vms', 'tmp')
shutil.rmtree(path, True)
os.mkdir(path)
collector.write_data_locally(path, data, 10)
@ -125,7 +126,8 @@ class LocalManager(TestCase):
with MockTransaction:
connection = libvirt.virConnect()
domain = mock('domain')
expect(connection).lookupByUUIDString(uuid).and_return(domain).once()
expect(connection).lookupByUUIDString(uuid). \
and_return(domain).once()
expect(domain).getMaxMemory().and_return(x).once()
assert manager.get_max_ram(connection, uuid) == int(x / 1024)
@ -135,15 +137,17 @@ class LocalManager(TestCase):
):
with MockTransaction:
connection = libvirt.virConnect()
expect(connection).lookupByUUIDString(uuid).and_return(None).once()
expect(connection).lookupByUUIDString(uuid). \
and_return(None).once()
assert manager.get_max_ram(connection, uuid) is None
def test_vm_mhz_to_percentage(self):
self.assertEqual(manager.vm_mhz_to_percentage({'a': [100, 200, 300],
'b': [100, 300, 200],
'c': [100, 100, 700]},
3000),
[0.1, 0.2, 0.4])
self.assertEqual(manager.vm_mhz_to_percentage(
{'a': [100, 200, 300],
'b': [100, 300, 200],
'c': [100, 100, 700]},
3000),
[0.1, 0.2, 0.4])
@qc(10)
def calculate_migration_time(
@ -156,4 +160,5 @@ class LocalManager(TestCase):
):
ram = data.values()
migration_time = float(sum(ram)) / len(ram) / bandwidth
assert manager.calculate_migration_time(data, bandwidth) == migration_time
assert manager.calculate_migration_time(data, bandwidth) == \
migration_time

View File

@ -38,7 +38,8 @@ class Selection(TestCase):
def minimum_utilization_factory(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000), min_length=1, max_length=10),
values=list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
min_length=1, max_length=5
)
):
@ -54,7 +55,8 @@ class Selection(TestCase):
def random_factory(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=1, max_length=3
)
):
@ -81,7 +83,8 @@ class Selection(TestCase):
def minimum_utilization(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000), min_length=1, max_length=10),
values=list_(of=int_(min=0, max=3000),
min_length=1, max_length=10),
min_length=1, max_length=5
)
):
@ -96,7 +99,8 @@ class Selection(TestCase):
def random(
x=dict_(
keys=str_(of='abc123-', min_length=36, max_length=36),
values=list_(of=int_(min=0, max=3000), min_length=0, max_length=10),
values=list_(of=int_(min=0, max=3000),
min_length=0, max_length=10),
min_length=1, max_length=3
)
):

View File

@ -62,8 +62,10 @@ class Common(TestCase):
def physical_cpu_mhz_total(x=int_(min=0, max=8), y=int_(min=0, max=8)):
with MockTransaction:
connection = libvirt.virConnect()
expect(common).physical_cpu_count(connection).and_return(x).once()
expect(common).physical_cpu_mhz(connection).and_return(y).once()
expect(common).physical_cpu_count(connection). \
and_return(x).once()
expect(common).physical_cpu_mhz(connection). \
and_return(y).once()
assert common.physical_cpu_mhz_total(connection) == x * y
def test_frange(self):

View File

@ -34,40 +34,48 @@ class Config(TestCase):
@qc
def validate_valid_config(
x=list_(of=str_(of='abc123_', max_length=20), min_length=0, max_length=10)
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=10)
):
test_config = dict(zip(x, x))
assert config.validate_config(test_config, x)
@qc
def validate_invalid_config(
x=list_(of=str_(of='abc123_', max_length=20), min_length=0, max_length=5),
y=list_(of=str_(of='abc123_', max_length=20), min_length=6, max_length=10)
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=5),
y=list_(of=str_(of='abc123_', max_length=20),
min_length=6, max_length=10)
):
test_config = dict(zip(x, x))
assert not config.validate_config(test_config, y)
@qc(10)
def read_and_validate_valid_config(
x=list_(of=str_(of='abc123_', max_length=20), min_length=0, max_length=10)
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=10)
):
with MockTransaction:
test_config = dict(zip(x, x))
paths = ['path1', 'path2']
expect(config).read_config(paths).and_return(test_config).once()
expect(config).validate_config(test_config, x).and_return(True).once()
expect(config).validate_config(test_config, x). \
and_return(True).once()
assert config.read_and_validate_config(paths, x) == test_config
@qc(10)
def read_and_validate_invalid_config(
x=list_(of=str_(of='abc123_', max_length=20), min_length=0, max_length=5),
y=list_(of=str_(of='abc123_', max_length=20), min_length=6, max_length=10)
x=list_(of=str_(of='abc123_', max_length=20),
min_length=0, max_length=5),
y=list_(of=str_(of='abc123_', max_length=20),
min_length=6, max_length=10)
):
with MockTransaction:
test_config = dict(zip(x, x))
paths = [config.DEFAILT_CONFIG_PATH, config.CONFIG_PATH]
expect(config).read_config(paths).and_return(test_config).once()
expect(config).validate_config(test_config, y).and_return(False).once()
expect(config).validate_config(test_config, y). \
and_return(False).once()
try:
config.read_and_validate_config(paths, y)
except KeyError:

View File

@ -26,7 +26,8 @@ class Db(TestCase):
db.vms.insert().execute(uuid='test')
assert db.vms.select().execute().first()['uuid'] == 'test'
db.vm_resource_usage.insert().execute(vm_id=1, cpu_mhz=1000)
assert db.vm_resource_usage.select().execute().first()['cpu_mhz'] == 1000
assert db.vm_resource_usage.select(). \
execute().first()['cpu_mhz'] == 1000
@qc(10)
def select_cpu_mhz_for_vm(
@ -129,5 +130,6 @@ class Db(TestCase):
db.update_host('host1', 3000, 4000)
db.update_host('host2', 3500, 8000)
assert db.select_host_characteristics() == ({'host1': 3000, 'host2': 3500},
{'host1': 4000, 'host2': 8000})
assert db.select_host_characteristics() == \
({'host1': 3000, 'host2': 3500},
{'host1': 4000, 'host2': 8000})