fix tox
Change-Id: I3f77424c3d41fb21b9d562ee1bb82ea0e869b773
This commit is contained in:
parent
feaa33b8c4
commit
70fc4850d0
@ -2,8 +2,8 @@
|
||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
||||
OS_LOG_CAPTURE=${OS_LOG_CAPTURE:-1} \
|
||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-160} \
|
||||
${PYTHON:-python} -m subunit.run discover -t ./ ./terracotta/tests/unit $LISTOPT $IDOPTION
|
||||
${PYTHON:-python} -m subunit.run discover -s ${OS_TEST_PATH:-./terracotta} -t . $LISTOPT $IDOPTION
|
||||
|
||||
test_id_option=--load-list $IDFILE
|
||||
test_list_option=--list
|
||||
group_regex=([^\.]+\.)+
|
||||
|
209
doc/source/conf.py
Normal file
209
doc/source/conf.py
Normal file
@ -0,0 +1,209 @@
|
||||
# Copyright (c) 2016 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This file is execfile()'d with the current directory set to it's containing
|
||||
# dir.
|
||||
#
|
||||
# Note that not all possible configuration values are presented in this
|
||||
# autogenerated file.
|
||||
#
|
||||
# All configuration parameters have their own default values,
|
||||
# which can be commented out to use.
|
||||
|
||||
import os
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
|
||||
extensions = ['sphinx.ext.autodoc']
|
||||
|
||||
todo_include_todos = True
|
||||
|
||||
# Add any paths that contain templates here, relative to this directory.
|
||||
templates_path = []
|
||||
if os.getenv('HUDSON_PUBLISH_DOCS'):
|
||||
templates_path = ['_ga', '_templates']
|
||||
else:
|
||||
templates_path = ['_templates']
|
||||
|
||||
# The suffix of source filenames.
|
||||
source_suffix = '.rst'
|
||||
|
||||
# The encoding of source files.
|
||||
#source_encoding = 'utf-8'
|
||||
|
||||
# The master toctree document.
|
||||
master_doc = 'index'
|
||||
|
||||
# General information about the project.
|
||||
project = u'Terracotta'
|
||||
copyright = u'2011-present, OpenStack Foundation.'
|
||||
|
||||
# The version info for the project you're documenting, acts as replacement for
|
||||
# |version| and |release|, also used in various other places throughout the
|
||||
# built documents.
|
||||
#
|
||||
# Version info
|
||||
release = '0.1.0'
|
||||
# The short X.Y version.
|
||||
version = '0.1.0'
|
||||
|
||||
# The language for the content autogenerated by Sphinx. Refer to documentation
|
||||
# for a list of supported languages.
|
||||
#language = None
|
||||
|
||||
# There are two options for replacing |today|: either, you set today to some
|
||||
# non-false value:
|
||||
#today = ''
|
||||
# Or, today_fmt is used as the format for a strftime call.
|
||||
#today_fmt = '%B %d, %Y'
|
||||
|
||||
# List of documents that shouldn't be included in the build.
|
||||
#unused_docs = []
|
||||
|
||||
# List of directories, relative to source directory, that shouldn't be searched
|
||||
# for source files.
|
||||
exclude_trees = []
|
||||
|
||||
# The reST default role (for this markup: `text`) to use for all documents.
|
||||
#default_role = None
|
||||
|
||||
# If true, '()' will be appended to :func: etc. cross-reference text.
|
||||
#add_function_parentheses = True
|
||||
|
||||
# If true, the current module name will be prepended to all description
|
||||
# unit titles (such as .. function::).
|
||||
#add_module_names = True
|
||||
|
||||
# If true, sectionauthor and moduleauthor directives will be shown in the
|
||||
# output. They are ignored by default.
|
||||
show_authors = True
|
||||
|
||||
# The name of the Pygments (syntax highlighting) style to use.
|
||||
pygments_style = 'sphinx'
|
||||
|
||||
# A list of ignored prefixes for module index sorting.
|
||||
modindex_common_prefix = ['terracotta.']
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
|
||||
# The theme to use for HTML and HTML Help pages. Major themes that come with
|
||||
# Sphinx are currently 'default' and 'sphinxdoc'.
|
||||
#html_theme_path = ["."]
|
||||
#html_theme = '_theme'
|
||||
|
||||
# Theme options are theme-specific and customize the look and feel of a theme
|
||||
# further. For a list of options available for each theme, see the
|
||||
# documentation.
|
||||
#html_theme_options = {}
|
||||
|
||||
# Add any paths that contain custom themes here, relative to this directory.
|
||||
#html_theme_path = ['_theme']
|
||||
|
||||
# The name for this set of Sphinx documents. If None, it defaults to
|
||||
# "<project> v<release> documentation".
|
||||
#html_title = None
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
#html_short_title = None
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
#html_logo = None
|
||||
|
||||
# The name of an image file (within the static path) to use as favicon of the
|
||||
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
|
||||
# pixels large.
|
||||
#html_favicon = None
|
||||
|
||||
# Add any paths that contain custom static files (such as style sheets) here,
|
||||
# relative to this directory. They are copied after the builtin static files,
|
||||
# so a file named "default.css" will overwrite the builtin "default.css".
|
||||
#html_static_path = ['_static']
|
||||
|
||||
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
|
||||
# using the given strftime format.
|
||||
#html_last_updated_fmt = '%b %d, %Y'
|
||||
git_cmd = "git log --pretty=format:'%ad, commit %h' --date=local -n1"
|
||||
html_last_updated_fmt = os.popen(git_cmd).read()
|
||||
|
||||
# If true, SmartyPants will be used to convert quotes and dashes to
|
||||
# typographically correct entities.
|
||||
#html_use_smartypants = True
|
||||
|
||||
# Custom sidebar templates, map document names to template names.
|
||||
#html_sidebars = {}
|
||||
|
||||
# Additional templates that should be rendered to pages, maps page names to
|
||||
# template names.
|
||||
#html_additional_pages = {}
|
||||
|
||||
# If false, no module index is generated.
|
||||
#html_use_modindex = True
|
||||
|
||||
# If false, no index is generated.
|
||||
#html_use_index = True
|
||||
|
||||
# If true, the index is split into individual pages for each letter.
|
||||
#html_split_index = False
|
||||
|
||||
# If true, links to the reST sources are added to the pages.
|
||||
#html_show_sourcelink = True
|
||||
|
||||
# If true, an OpenSearch description file will be output, and all pages will
|
||||
# contain a <link> tag referring to it. The value of this option must be the
|
||||
# base URL from which the finished HTML is served.
|
||||
#html_use_opensearch = ''
|
||||
|
||||
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
|
||||
#html_file_suffix = ''
|
||||
|
||||
# Output file base name for HTML help builder.
|
||||
htmlhelp_basename = 'terracottadoc'
|
||||
|
||||
|
||||
# -- Options for LaTeX output ------------------------------------------------
|
||||
|
||||
# The paper size ('letter' or 'a4').
|
||||
#latex_paper_size = 'letter'
|
||||
|
||||
# The font size ('10pt', '11pt' or '12pt').
|
||||
#latex_font_size = '10pt'
|
||||
|
||||
# Grouping the document tree into LaTeX files. List of tuples
|
||||
# (source start file, target name, title, author,
|
||||
# documentclass [howto/manual]).
|
||||
latex_documents = [
|
||||
('index', 'Terracotta.tex', u'Terracotta Documentation',
|
||||
u'Terracotta development team', 'manual'),
|
||||
]
|
||||
|
||||
# The name of an image file (relative to this directory) used to
|
||||
# place at the top of the title page.
|
||||
#latex_logo = None
|
||||
|
||||
# If this is true for "manual" documents, top-level headings should be parts
|
||||
# instead of chapters.
|
||||
#latex_use_parts = False
|
||||
|
||||
# Additional stuff for the LaTeX preamble.
|
||||
#latex_preamble = ''
|
||||
|
||||
# Documents to append as an appendix to all manuals.
|
||||
#latex_appendices = []
|
||||
|
||||
# If false, no module index is generated.
|
||||
#latex_use_modindex = True
|
20
doc/source/index.rst
Normal file
20
doc/source/index.rst
Normal file
@ -0,0 +1,20 @@
|
||||
..
|
||||
Copyright 2011-2016 OpenStack Foundation
|
||||
All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
not use this file except in compliance with the License. You may obtain
|
||||
a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
License for the specific language governing permissions and limitations
|
||||
under the License.
|
||||
|
||||
Welcome to TerraCotta's developer documentation!
|
||||
===========================================
|
||||
|
||||
TerraCotta is OpenStack DRS service
|
@ -23,6 +23,7 @@ keystonemiddleware>=1.5.0
|
||||
libvirt-python>=1.2.5 # LGPLv2+
|
||||
netaddr>=0.7.12
|
||||
Mako>=0.4.0
|
||||
numpy # This is not in global requirements
|
||||
scipy # This is not in global requirements
|
||||
#numpy # This is not in global requirements These packages are not availabe now
|
||||
#scipy # This is not in global requirements These packages are not availabe now
|
||||
netifaces>=0.10.4
|
||||
# TODO fix numpy and scipy
|
1
setup.py
1
setup.py
@ -1,4 +1,3 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
@ -1,3 +1,4 @@
|
||||
# Copyright 2016 Huawei Tech inc.
|
||||
# Copyright 2012 Anton Beloglazov
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -13,7 +14,9 @@
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
OpenStack Neat :: an add-on to OpenStack implementing energy and performance efficient dynamic consolidation of virtual machines
|
||||
OpenStack Terracotta: an add-on to OpenStack implementing energy and
|
||||
performance efficient dynamic consolidation of virtual machines
|
||||
"""
|
||||
|
||||
__version__ = "0.1"
|
||||
__author__ = "Anton Beloglazov"
|
||||
__author__ = "Anton Beloglazov"
|
||||
|
@ -17,12 +17,13 @@ import eventlet
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from wsgiref import simple_server
|
||||
|
||||
import sys
|
||||
from terracotta.api import app
|
||||
from terracotta import config
|
||||
from terracotta import rpc
|
||||
from terracotta import version
|
||||
from wsgiref import simple_server
|
||||
|
||||
eventlet.monkey_patch(
|
||||
os=True,
|
||||
@ -103,4 +104,3 @@ def main():
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
@ -14,14 +14,15 @@
|
||||
|
||||
|
||||
import eventlet
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
import sys
|
||||
from terracotta import config
|
||||
from terracotta import rpc
|
||||
from terracotta.locals import collector
|
||||
from terracotta.openstack.common import threadgroup
|
||||
from terracotta import rpc
|
||||
from terracotta import version
|
||||
|
||||
eventlet.monkey_patch(
|
||||
@ -31,7 +32,6 @@ eventlet.monkey_patch(
|
||||
thread=False if '--use-debugger' in sys.argv else True,
|
||||
time=True)
|
||||
|
||||
import os
|
||||
|
||||
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
|
||||
os.pardir,
|
||||
@ -117,4 +117,3 @@ def main():
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
@ -20,8 +20,8 @@ from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
import sys
|
||||
from terracotta import config
|
||||
from terracotta import rpc
|
||||
from terracotta.globals import manager as global_mgr
|
||||
from terracotta import rpc
|
||||
from terracotta import version
|
||||
|
||||
|
||||
|
@ -17,17 +17,17 @@ import eventlet
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
from wsgiref import simple_server
|
||||
|
||||
import sys
|
||||
from terracotta.api import app
|
||||
from terracotta import config
|
||||
from terracotta import rpc
|
||||
from terracotta.globals import manager as global_mgr
|
||||
from terracotta.locals import collector
|
||||
from terracotta.locals import manager as local_mgr
|
||||
from terracotta.globals import manager as global_mgr
|
||||
from terracotta.openstack.common import threadgroup
|
||||
from terracotta import rpc
|
||||
from terracotta import version
|
||||
|
||||
from wsgiref import simple_server
|
||||
|
||||
eventlet.monkey_patch(
|
||||
os=True,
|
||||
|
@ -14,16 +14,18 @@
|
||||
|
||||
|
||||
import eventlet
|
||||
import os
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
|
||||
import sys
|
||||
from terracotta import config
|
||||
from terracotta import rpc
|
||||
from terracotta.locals import manager as local_mgr
|
||||
from terracotta.openstack.common import threadgroup
|
||||
from terracotta import rpc
|
||||
from terracotta import version
|
||||
import os
|
||||
|
||||
|
||||
eventlet.monkey_patch(
|
||||
os=True,
|
||||
@ -40,11 +42,10 @@ if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'terracotta', '__init__.py')):
|
||||
sys.path.insert(0, POSSIBLE_TOPDIR)
|
||||
|
||||
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def launch_local_manager(transport):
|
||||
target = messaging.Target(
|
||||
topic=cfg.CONF.local_manager.topic,
|
||||
@ -72,6 +73,7 @@ def launch_local_manager(transport):
|
||||
server.start()
|
||||
server.wait()
|
||||
|
||||
|
||||
def launch_any(transport, options):
|
||||
thread = eventlet.spawn(launch_local_manager, transport)
|
||||
|
||||
@ -117,4 +119,3 @@ def main():
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
# Copyright 2012 Anton Beloglazov
|
||||
# Copyright 2015 Huawei Technologies Co. Ltd
|
||||
# Copyright 2012 Anton Beloglazov
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -13,18 +13,19 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" The functions from this module are shared by other components.
|
||||
"""
|
||||
The functions from this module are shared by other components.
|
||||
"""
|
||||
|
||||
import json
|
||||
import numpy
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
|
||||
def build_local_vm_path(local_data_directory):
|
||||
""" Build the path to the local VM data directory.
|
||||
"""Build the path to the local VM data directory.
|
||||
|
||||
:param local_data_directory: The base local data path.
|
||||
:return: The path to the local VM data directory.
|
||||
@ -33,7 +34,7 @@ def build_local_vm_path(local_data_directory):
|
||||
|
||||
|
||||
def build_local_host_path(local_data_directory):
|
||||
""" Build the path to the local host data file.
|
||||
"""Build the path to the local host data file.
|
||||
|
||||
:param local_data_directory: The base local data path.
|
||||
:return: The path to the local host data file.
|
||||
@ -42,7 +43,7 @@ def build_local_host_path(local_data_directory):
|
||||
|
||||
|
||||
def physical_cpu_count(vir_connection):
|
||||
""" Get the number of physical CPUs using libvirt.
|
||||
"""Get the number of physical CPUs using libvirt.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:return: The number of physical CPUs.
|
||||
@ -51,7 +52,7 @@ def physical_cpu_count(vir_connection):
|
||||
|
||||
|
||||
def physical_cpu_mhz(vir_connection):
|
||||
""" Get the CPU frequency in MHz using libvirt.
|
||||
"""Get the CPU frequency in MHz using libvirt.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:return: The CPU frequency in MHz.
|
||||
@ -60,7 +61,7 @@ def physical_cpu_mhz(vir_connection):
|
||||
|
||||
|
||||
def physical_cpu_mhz_total(vir_connection):
|
||||
""" Get the sum of the core CPU frequencies in MHz using libvirt.
|
||||
"""Get the sum of the core CPU frequencies in MHz using libvirt.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:return: The total CPU frequency in MHz.
|
||||
@ -70,7 +71,7 @@ def physical_cpu_mhz_total(vir_connection):
|
||||
|
||||
|
||||
def frange(start, end, step):
|
||||
""" A range generator for floats.
|
||||
"""A range generator for floats.
|
||||
|
||||
:param start: The starting value.
|
||||
:param end: The end value.
|
||||
@ -82,7 +83,7 @@ def frange(start, end, step):
|
||||
|
||||
|
||||
def call_function_by_name(name, args):
|
||||
""" Call a function specified by a fully qualified name.
|
||||
"""Call a function specified by a fully qualified name.
|
||||
|
||||
:param name: A fully qualified name of a function.
|
||||
:param args: A list of positional arguments of the function.
|
||||
@ -97,7 +98,7 @@ def call_function_by_name(name, args):
|
||||
|
||||
|
||||
def parse_parameters(params):
|
||||
""" Parse algorithm parameters from the config file.
|
||||
"""Parse algorithm parameters from the config file.
|
||||
|
||||
:param params: JSON encoded parameters.
|
||||
:return: A dict of parameters.
|
||||
@ -107,7 +108,7 @@ def parse_parameters(params):
|
||||
|
||||
|
||||
def parse_compute_hosts(compute_hosts):
|
||||
""" Transform a coma-separated list of host names into a list.
|
||||
"""Transform a coma-separated list of host names into a list.
|
||||
|
||||
:param compute_hosts: A coma-separated list of host names.
|
||||
:return: A list of host names.
|
||||
@ -116,7 +117,7 @@ def parse_compute_hosts(compute_hosts):
|
||||
|
||||
|
||||
def calculate_migration_time(vms, bandwidth):
|
||||
""" Calculate the mean migration time from VM RAM usage data.
|
||||
"""Calculate the mean migration time from VM RAM usage data.
|
||||
|
||||
:param vms: A map of VM UUIDs to the corresponding maximum RAM in MB.
|
||||
:param bandwidth: The network bandwidth in MB/s.
|
||||
@ -126,7 +127,7 @@ def calculate_migration_time(vms, bandwidth):
|
||||
|
||||
|
||||
def execute_on_hosts(hosts, commands):
|
||||
""" Execute Shell command on hosts over SSH.
|
||||
"""Execute Shell command on hosts over SSH.
|
||||
|
||||
:param hosts: A list of host names.
|
||||
:param commands: A list of Shell commands.
|
||||
|
@ -212,6 +212,11 @@ database_opts = [
|
||||
'supported by SQLAlchemy')
|
||||
]
|
||||
|
||||
db_cleaner_opts = [
|
||||
cfg.StrOpt('log_directory', default='/var/log/terracotta',
|
||||
help='db_cleaner log directory')
|
||||
]
|
||||
|
||||
CONF = cfg.CONF
|
||||
|
||||
CONF.register_opts(pecan_opts, group='pecan')
|
||||
@ -221,6 +226,7 @@ CONF.register_opts(global_manager_opts, group='global_manager')
|
||||
CONF.register_opts(local_manager_opts, group='local_manager')
|
||||
CONF.register_opts(collector_opts, group='collector')
|
||||
CONF.register_opts(database_opts, group='database')
|
||||
CONF.register_opts(db_cleaner_opts, group='db_cleaner')
|
||||
|
||||
CONF.register_cli_opt(use_debugger)
|
||||
CONF.register_cli_opt(launch_opt)
|
||||
|
@ -1,19 +0,0 @@
|
||||
# Copyright (c) 2015 - 2016 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
DB abstraction for Terracotta
|
||||
"""
|
||||
|
||||
from terracotta.db.api import *
|
@ -28,7 +28,7 @@ IMPL = db_concurrency.TpoolDbapiWrapper(CONF, backend_mapping=_BACKEND_MAPPING)
|
||||
|
||||
|
||||
def select_cpu_mhz_for_vm(uuid, limit):
|
||||
""" Select n last values of CPU MHz for a VM UUID.
|
||||
"""Select n last values of CPU MHz for a VM UUID.
|
||||
|
||||
:param uuid: The UUID of a VM.
|
||||
:param limit: The number of last values to select.
|
||||
@ -38,7 +38,7 @@ def select_cpu_mhz_for_vm(uuid, limit):
|
||||
|
||||
|
||||
def select_last_cpu_mhz_for_vms():
|
||||
""" Select the last value of CPU MHz for all the VMs.
|
||||
"""Select the last value of CPU MHz for all the VMs.
|
||||
|
||||
:return: A dict of VM UUIDs to the last CPU MHz values.
|
||||
"""
|
||||
@ -46,7 +46,7 @@ def select_last_cpu_mhz_for_vms():
|
||||
|
||||
|
||||
def select_vm_id(uuid):
|
||||
""" Select the ID of a VM by the VM UUID, or insert a new record.
|
||||
"""Select the ID of a VM by the VM UUID, or insert a new record.
|
||||
|
||||
:param uuid: The UUID of a VM.
|
||||
:return: The ID of the VM.
|
||||
@ -55,7 +55,7 @@ def select_vm_id(uuid):
|
||||
|
||||
|
||||
def insert_vm_cpu_mhz(data):
|
||||
""" Insert a set of CPU MHz values for a set of VMs.
|
||||
"""Insert a set of CPU MHz values for a set of VMs.
|
||||
|
||||
:param data: A dictionary of VM UUIDs and CPU MHz values.
|
||||
"""
|
||||
@ -63,7 +63,7 @@ def insert_vm_cpu_mhz(data):
|
||||
|
||||
|
||||
def update_host(hostname, cpu_mhz, cpu_cores, ram):
|
||||
""" Insert new or update the corresponding host record.
|
||||
"""Insert new or update the corresponding host record.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param cpu_mhz: The total CPU frequency of the host in MHz.
|
||||
@ -75,7 +75,7 @@ def update_host(hostname, cpu_mhz, cpu_cores, ram):
|
||||
|
||||
|
||||
def insert_host_cpu_mhz(hostname, cpu_mhz):
|
||||
""" Insert a CPU MHz value for a host.
|
||||
"""Insert a CPU MHz value for a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param cpu_mhz: The CPU usage of the host in MHz.
|
||||
@ -84,7 +84,7 @@ def insert_host_cpu_mhz(hostname, cpu_mhz):
|
||||
|
||||
|
||||
def select_cpu_mhz_for_host(hostname, limit):
|
||||
""" Select n last values of CPU MHz for a host.
|
||||
"""Select n last values of CPU MHz for a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param limit: The number of last values to select.
|
||||
@ -94,7 +94,7 @@ def select_cpu_mhz_for_host(hostname, limit):
|
||||
|
||||
|
||||
def select_last_cpu_mhz_for_hosts():
|
||||
""" Select the last value of CPU MHz for all the hosts.
|
||||
"""Select the last value of CPU MHz for all the hosts.
|
||||
|
||||
:return: A dict of host names to the last CPU MHz values.
|
||||
"""
|
||||
@ -102,7 +102,7 @@ def select_last_cpu_mhz_for_hosts():
|
||||
|
||||
|
||||
def select_host_characteristics(self):
|
||||
""" Select the characteristics of all the hosts.
|
||||
"""Select the characteristics of all the hosts.
|
||||
|
||||
:return: Three dicts of hostnames to CPU MHz, cores, and RAM.
|
||||
"""
|
||||
@ -110,7 +110,7 @@ def select_host_characteristics(self):
|
||||
|
||||
|
||||
def select_host_id(hostname):
|
||||
""" Select the ID of a host.
|
||||
"""Select the ID of a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:return: The ID of the host.
|
||||
@ -119,7 +119,7 @@ def select_host_id(hostname):
|
||||
|
||||
|
||||
def select_host_ids():
|
||||
""" Select the IDs of all the hosts.
|
||||
"""Select the IDs of all the hosts.
|
||||
|
||||
:return: A dict of host names to IDs.
|
||||
"""
|
||||
@ -127,7 +127,7 @@ def select_host_ids():
|
||||
|
||||
|
||||
def cleanup_vm_resource_usage(datetime_threshold):
|
||||
""" Delete VM resource usage data older than the threshold.
|
||||
"""Delete VM resource usage data older than the threshold.
|
||||
|
||||
:param datetime_threshold: A datetime threshold.
|
||||
"""
|
||||
@ -135,7 +135,7 @@ def cleanup_vm_resource_usage(datetime_threshold):
|
||||
|
||||
|
||||
def cleanup_host_resource_usage(sdatetime_threshold):
|
||||
""" Delete host resource usage data older than the threshold.
|
||||
"""Delete host resource usage data older than the threshold.
|
||||
|
||||
:param datetime_threshold: A datetime threshold.
|
||||
"""
|
||||
@ -143,7 +143,7 @@ def cleanup_host_resource_usage(sdatetime_threshold):
|
||||
|
||||
|
||||
def insert_host_states(hosts):
|
||||
""" Insert host states for a set of hosts.
|
||||
"""Insert host states for a set of hosts.
|
||||
|
||||
:param hosts: A dict of hostnames to states (0, 1).
|
||||
"""
|
||||
@ -151,7 +151,7 @@ def insert_host_states(hosts):
|
||||
|
||||
|
||||
def select_host_states():
|
||||
""" Select the current states of all the hosts.
|
||||
"""Select the current states of all the hosts.
|
||||
|
||||
:return: A dict of host names to states.
|
||||
"""
|
||||
@ -159,7 +159,7 @@ def select_host_states():
|
||||
|
||||
|
||||
def select_active_hosts():
|
||||
""" Select the currently active hosts.
|
||||
"""Select the currently active hosts.
|
||||
|
||||
:return: A list of host names.
|
||||
"""
|
||||
@ -167,7 +167,7 @@ def select_active_hosts():
|
||||
|
||||
|
||||
def select_inactive_hosts():
|
||||
""" Select the currently inactive hosts.
|
||||
"""Select the currently inactive hosts.
|
||||
|
||||
:return: A list of host names.
|
||||
"""
|
||||
@ -175,7 +175,7 @@ def select_inactive_hosts():
|
||||
|
||||
|
||||
def insert_host_overload(hostname, overload):
|
||||
""" Insert whether a host is overloaded.
|
||||
"""Insert whether a host is overloaded.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param overload: Whether the host is overloaded.
|
||||
@ -184,7 +184,7 @@ def insert_host_overload(hostname, overload):
|
||||
|
||||
|
||||
def insert_vm_migration(vm, hostname):
|
||||
""" Insert a VM migration.
|
||||
"""Insert a VM migration.
|
||||
|
||||
:param hostname: A VM UUID.
|
||||
:param hostname: A host name.
|
||||
|
@ -19,11 +19,11 @@ import sys
|
||||
import threading
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_db import exception as db_exc
|
||||
from oslo_db import options
|
||||
from oslo_db.sqlalchemy import session as db_session
|
||||
from oslo_log import log as logging
|
||||
|
||||
from sqlalchemy import and_
|
||||
from sqlalchemy.sql import select
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -68,7 +68,7 @@ def get_backend():
|
||||
|
||||
|
||||
def select_cpu_mhz_for_vm(self, uuid, n):
|
||||
""" Select n last values of CPU MHz for a VM UUID.
|
||||
"""Select n last values of CPU MHz for a VM UUID.
|
||||
|
||||
:param uuid: The UUID of a VM.
|
||||
:param n: The number of last values to select.
|
||||
@ -83,8 +83,9 @@ def select_cpu_mhz_for_vm(self, uuid, n):
|
||||
res = self.connection.execute(sel).fetchall()
|
||||
return list(reversed([int(x[0]) for x in res]))
|
||||
|
||||
|
||||
def select_last_cpu_mhz_for_vms(self):
|
||||
""" Select the last value of CPU MHz for all the VMs.
|
||||
"""Select the last value of CPU MHz for all the VMs.
|
||||
|
||||
:return: A dict of VM UUIDs to the last CPU MHz values.
|
||||
"""
|
||||
@ -94,7 +95,7 @@ def select_last_cpu_mhz_for_vms(self):
|
||||
vru1.outerjoin(vru2, and_(
|
||||
vru1.c.vm_id == vru2.c.vm_id,
|
||||
vru1.c.id < vru2.c.id))]). \
|
||||
where(vru2.c.id == None)
|
||||
where(vru2.c.id is None)
|
||||
vms_cpu_mhz = dict(self.connection.execute(sel).fetchall())
|
||||
vms_uuids = dict(self.vms.select().execute().fetchall())
|
||||
|
||||
@ -106,8 +107,9 @@ def select_last_cpu_mhz_for_vms(self):
|
||||
vms_last_mhz[str(uuid)] = 0
|
||||
return vms_last_mhz
|
||||
|
||||
|
||||
def select_vm_id(self, uuid):
|
||||
""" Select the ID of a VM by the VM UUID, or insert a new record.
|
||||
"""Select the ID of a VM by the VM UUID, or insert a new record.
|
||||
|
||||
:param uuid: The UUID of a VM.
|
||||
:return: The ID of the VM.
|
||||
@ -121,8 +123,9 @@ def select_vm_id(self, uuid):
|
||||
else:
|
||||
return int(row['id'])
|
||||
|
||||
|
||||
def insert_vm_cpu_mhz(self, data):
|
||||
""" Insert a set of CPU MHz values for a set of VMs.
|
||||
"""Insert a set of CPU MHz values for a set of VMs.
|
||||
|
||||
:param data: A dictionary of VM UUIDs and CPU MHz values.
|
||||
"""
|
||||
@ -134,8 +137,9 @@ def insert_vm_cpu_mhz(self, data):
|
||||
'cpu_mhz': cpu_mhz})
|
||||
self.vm_resource_usage.insert().execute(query)
|
||||
|
||||
|
||||
def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
|
||||
""" Insert new or update the corresponding host record.
|
||||
"""Insert new or update the corresponding host record.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param cpu_mhz: The total CPU frequency of the host in MHz.
|
||||
@ -163,8 +167,9 @@ def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
|
||||
ram=ram))
|
||||
return int(row['id'])
|
||||
|
||||
|
||||
def insert_host_cpu_mhz(self, hostname, cpu_mhz):
|
||||
""" Insert a CPU MHz value for a host.
|
||||
"""Insert a CPU MHz value for a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param cpu_mhz: The CPU usage of the host in MHz.
|
||||
@ -173,8 +178,9 @@ def insert_host_cpu_mhz(self, hostname, cpu_mhz):
|
||||
host_id=self.select_host_id(hostname),
|
||||
cpu_mhz=cpu_mhz)
|
||||
|
||||
|
||||
def select_cpu_mhz_for_host(self, hostname, n):
|
||||
""" Select n last values of CPU MHz for a host.
|
||||
"""Select n last values of CPU MHz for a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param n: The number of last values to select.
|
||||
@ -189,8 +195,9 @@ def select_cpu_mhz_for_host(self, hostname, n):
|
||||
res = self.connection.execute(sel).fetchall()
|
||||
return list(reversed([int(x[0]) for x in res]))
|
||||
|
||||
|
||||
def select_last_cpu_mhz_for_hosts(self):
|
||||
""" Select the last value of CPU MHz for all the hosts.
|
||||
"""Select the last value of CPU MHz for all the hosts.
|
||||
|
||||
:return: A dict of host names to the last CPU MHz values.
|
||||
"""
|
||||
@ -200,7 +207,7 @@ def select_last_cpu_mhz_for_hosts(self):
|
||||
hru1.outerjoin(hru2, and_(
|
||||
hru1.c.host_id == hru2.c.host_id,
|
||||
hru1.c.id < hru2.c.id))]). \
|
||||
where(hru2.c.id == None)
|
||||
where(hru2.c.id is None)
|
||||
hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall())
|
||||
|
||||
sel = select([self.hosts.c.id, self.hosts.c.hostname])
|
||||
@ -214,8 +221,9 @@ def select_last_cpu_mhz_for_hosts(self):
|
||||
hosts_last_mhz[str(hostname)] = 0
|
||||
return hosts_last_mhz
|
||||
|
||||
|
||||
def select_host_characteristics(self):
|
||||
""" Select the characteristics of all the hosts.
|
||||
"""Select the characteristics of all the hosts.
|
||||
|
||||
:return: Three dicts of hostnames to CPU MHz, cores, and RAM.
|
||||
"""
|
||||
@ -229,8 +237,9 @@ def select_host_characteristics(self):
|
||||
hosts_ram[hostname] = int(x[4])
|
||||
return hosts_cpu_mhz, hosts_cpu_cores, hosts_ram
|
||||
|
||||
|
||||
def select_host_id(self, hostname):
|
||||
""" Select the ID of a host.
|
||||
"""Select the ID of a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:return: The ID of the host.
|
||||
@ -242,16 +251,18 @@ def select_host_id(self, hostname):
|
||||
raise LookupError('No host found for hostname: %s', hostname)
|
||||
return int(row['id'])
|
||||
|
||||
|
||||
def select_host_ids(self):
|
||||
""" Select the IDs of all the hosts.
|
||||
"""Select the IDs of all the hosts.
|
||||
|
||||
:return: A dict of host names to IDs.
|
||||
"""
|
||||
return dict((str(x[1]), int(x[0]))
|
||||
for x in self.hosts.select().execute().fetchall())
|
||||
|
||||
|
||||
def cleanup_vm_resource_usage(self, datetime_threshold):
|
||||
""" Delete VM resource usage data older than the threshold.
|
||||
"""Delete VM resource usage data older than the threshold.
|
||||
|
||||
:param datetime_threshold: A datetime threshold.
|
||||
"""
|
||||
@ -259,8 +270,9 @@ def cleanup_vm_resource_usage(self, datetime_threshold):
|
||||
self.vm_resource_usage.delete().where(
|
||||
self.vm_resource_usage.c.timestamp < datetime_threshold))
|
||||
|
||||
|
||||
def cleanup_host_resource_usage(self, datetime_threshold):
|
||||
""" Delete host resource usage data older than the threshold.
|
||||
"""Delete host resource usage data older than the threshold.
|
||||
|
||||
:param datetime_threshold: A datetime threshold.
|
||||
"""
|
||||
@ -268,8 +280,9 @@ def cleanup_host_resource_usage(self, datetime_threshold):
|
||||
self.host_resource_usage.delete().where(
|
||||
self.host_resource_usage.c.timestamp < datetime_threshold))
|
||||
|
||||
|
||||
def insert_host_states(self, hosts):
|
||||
""" Insert host states for a set of hosts.
|
||||
"""Insert host states for a set of hosts.
|
||||
|
||||
:param hosts: A dict of hostnames to states (0, 1).
|
||||
"""
|
||||
@ -280,8 +293,9 @@ def insert_host_states(self, hosts):
|
||||
self.connection.execute(
|
||||
self.host_states.insert(), to_insert)
|
||||
|
||||
|
||||
def select_host_states(self):
|
||||
""" Select the current states of all the hosts.
|
||||
"""Select the current states of all the hosts.
|
||||
|
||||
:return: A dict of host names to states.
|
||||
"""
|
||||
@ -291,7 +305,7 @@ def select_host_states(self):
|
||||
hs1.outerjoin(hs2, and_(
|
||||
hs1.c.host_id == hs2.c.host_id,
|
||||
hs1.c.id < hs2.c.id))]). \
|
||||
where(hs2.c.id == None)
|
||||
where(hs2.c.id is None)
|
||||
data = dict(self.connection.execute(sel).fetchall())
|
||||
host_ids = self.select_host_ids()
|
||||
host_states = {}
|
||||
@ -302,8 +316,9 @@ def select_host_states(self):
|
||||
host_states[str(host)] = 1
|
||||
return host_states
|
||||
|
||||
|
||||
def select_active_hosts(self):
|
||||
""" Select the currently active hosts.
|
||||
"""Select the currently active hosts.
|
||||
|
||||
:return: A list of host names.
|
||||
"""
|
||||
@ -311,8 +326,9 @@ def select_active_hosts(self):
|
||||
for host, state in self.select_host_states().items()
|
||||
if state == 1]
|
||||
|
||||
|
||||
def select_inactive_hosts(self):
|
||||
""" Select the currently inactive hosts.
|
||||
"""Select the currently inactive hosts.
|
||||
|
||||
:return: A list of host names.
|
||||
"""
|
||||
@ -320,8 +336,9 @@ def select_inactive_hosts(self):
|
||||
for host, state in self.select_host_states().items()
|
||||
if state == 0]
|
||||
|
||||
|
||||
def insert_host_overload(self, hostname, overload):
|
||||
""" Insert whether a host is overloaded.
|
||||
"""Insert whether a host is overloaded.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param overload: Whether the host is overloaded.
|
||||
@ -330,8 +347,9 @@ def insert_host_overload(self, hostname, overload):
|
||||
host_id=self.select_host_id(hostname),
|
||||
overload=int(overload))
|
||||
|
||||
|
||||
def insert_vm_migration(self, vm, hostname):
|
||||
""" Insert a VM migration.
|
||||
"""nsert a VM migration.
|
||||
|
||||
:param hostname: A VM UUID.
|
||||
:param hostname: A host name.
|
||||
|
@ -19,15 +19,16 @@ SQLAlchemy models for Terracotta data.
|
||||
from oslo_config import cfg
|
||||
from oslo_db.sqlalchemy import models
|
||||
from oslo_utils import timeutils
|
||||
from sqlalchemy import Column, Integer, String, Text, schema
|
||||
from sqlalchemy import Column, Integer, String
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import ForeignKey, DateTime, Boolean
|
||||
from sqlalchemy.orm import relationship, backref, validates
|
||||
from sqlalchemy import DateTime, Boolean
|
||||
from sqlalchemy.orm import relationship
|
||||
|
||||
|
||||
CONF = cfg.CONF
|
||||
BASE = declarative_base()
|
||||
|
||||
|
||||
class TerracottaBase(models.TimestampMixin,
|
||||
models.ModelBase):
|
||||
"""Base class for TerracottaBase Models."""
|
||||
@ -68,6 +69,7 @@ class HostResourceUsage(BASE, TerracottaBase):
|
||||
foreign_keys=host_id,
|
||||
primaryjoin='HostResourceUsage.host_id == Host.id')
|
||||
|
||||
|
||||
class VM(BASE, TerracottaBase):
|
||||
__tablename__ = 'vms'
|
||||
id = Column(Integer, primary_key=True)
|
||||
@ -123,4 +125,4 @@ class HostOverload(BASE, TerracottaBase):
|
||||
|
||||
host = relationship(Host, backref="host_overload",
|
||||
foreign_keys=host_id,
|
||||
primaryjoin='host_overload.host_id == Host.id')
|
||||
primaryjoin='host_overload.host_id == Host.id')
|
||||
|
@ -13,23 +13,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
from sqlalchemy import *
|
||||
from sqlalchemy.engine.base import Connection
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from sqlalchemy import and_
|
||||
from sqlalchemy import select
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Database(object):
|
||||
""" A class representing the database, where fields are tables.
|
||||
"""A class representing the database, where fields are tables.
|
||||
"""
|
||||
|
||||
def __init__(self, connection, hosts, host_resource_usage, vms,
|
||||
vm_resource_usage, vm_migrations, host_states, host_overload):
|
||||
""" Initialize the database.
|
||||
"""Initialize the database.
|
||||
|
||||
:param connection: A database connection table.
|
||||
:param hosts: The hosts table.
|
||||
@ -51,7 +49,7 @@ class Database(object):
|
||||
LOG.debug('Instantiated a Database object')
|
||||
|
||||
def select_cpu_mhz_for_vm(self, uuid, n):
|
||||
""" Select n last values of CPU MHz for a VM UUID.
|
||||
"""Select n last values of CPU MHz for a VM UUID.
|
||||
|
||||
:param uuid: The UUID of a VM.
|
||||
:param n: The number of last values to select.
|
||||
@ -67,7 +65,7 @@ class Database(object):
|
||||
return list(reversed([int(x[0]) for x in res]))
|
||||
|
||||
def select_last_cpu_mhz_for_vms(self):
|
||||
""" Select the last value of CPU MHz for all the VMs.
|
||||
"""Select the last value of CPU MHz for all the VMs.
|
||||
|
||||
:return: A dict of VM UUIDs to the last CPU MHz values.
|
||||
"""
|
||||
@ -77,7 +75,7 @@ class Database(object):
|
||||
vru1.outerjoin(vru2, and_(
|
||||
vru1.c.vm_id == vru2.c.vm_id,
|
||||
vru1.c.id < vru2.c.id))]). \
|
||||
where(vru2.c.id == None)
|
||||
where(vru2.c.id is None)
|
||||
vms_cpu_mhz = dict(self.connection.execute(sel).fetchall())
|
||||
vms_uuids = dict(self.vms.select().execute().fetchall())
|
||||
|
||||
@ -90,7 +88,7 @@ class Database(object):
|
||||
return vms_last_mhz
|
||||
|
||||
def select_vm_id(self, uuid):
|
||||
""" Select the ID of a VM by the VM UUID, or insert a new record.
|
||||
"""Select the ID of a VM by the VM UUID, or insert a new record.
|
||||
|
||||
:param uuid: The UUID of a VM.
|
||||
:return: The ID of the VM.
|
||||
@ -105,7 +103,7 @@ class Database(object):
|
||||
return int(row['id'])
|
||||
|
||||
def insert_vm_cpu_mhz(self, data):
|
||||
""" Insert a set of CPU MHz values for a set of VMs.
|
||||
"""Insert a set of CPU MHz values for a set of VMs.
|
||||
|
||||
:param data: A dictionary of VM UUIDs and CPU MHz values.
|
||||
"""
|
||||
@ -118,7 +116,7 @@ class Database(object):
|
||||
self.vm_resource_usage.insert().execute(query)
|
||||
|
||||
def update_host(self, hostname, cpu_mhz, cpu_cores, ram):
|
||||
""" Insert new or update the corresponding host record.
|
||||
"""Insert new or update the corresponding host record.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param cpu_mhz: The total CPU frequency of the host in MHz.
|
||||
@ -147,7 +145,7 @@ class Database(object):
|
||||
return int(row['id'])
|
||||
|
||||
def insert_host_cpu_mhz(self, hostname, cpu_mhz):
|
||||
""" Insert a CPU MHz value for a host.
|
||||
"""Insert a CPU MHz value for a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param cpu_mhz: The CPU usage of the host in MHz.
|
||||
@ -157,7 +155,7 @@ class Database(object):
|
||||
cpu_mhz=cpu_mhz)
|
||||
|
||||
def select_cpu_mhz_for_host(self, hostname, n):
|
||||
""" Select n last values of CPU MHz for a host.
|
||||
"""Select n last values of CPU MHz for a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param n: The number of last values to select.
|
||||
@ -173,7 +171,7 @@ class Database(object):
|
||||
return list(reversed([int(x[0]) for x in res]))
|
||||
|
||||
def select_last_cpu_mhz_for_hosts(self):
|
||||
""" Select the last value of CPU MHz for all the hosts.
|
||||
"""Select the last value of CPU MHz for all the hosts.
|
||||
|
||||
:return: A dict of host names to the last CPU MHz values.
|
||||
"""
|
||||
@ -183,7 +181,7 @@ class Database(object):
|
||||
hru1.outerjoin(hru2, and_(
|
||||
hru1.c.host_id == hru2.c.host_id,
|
||||
hru1.c.id < hru2.c.id))]). \
|
||||
where(hru2.c.id == None)
|
||||
where(hru2.c.id is None)
|
||||
hosts_cpu_mhz = dict(self.connection.execute(sel).fetchall())
|
||||
|
||||
sel = select([self.hosts.c.id, self.hosts.c.hostname])
|
||||
@ -198,7 +196,7 @@ class Database(object):
|
||||
return hosts_last_mhz
|
||||
|
||||
def select_host_characteristics(self):
|
||||
""" Select the characteristics of all the hosts.
|
||||
"""Select the characteristics of all the hosts.
|
||||
|
||||
:return: Three dicts of hostnames to CPU MHz, cores, and RAM.
|
||||
"""
|
||||
@ -213,7 +211,7 @@ class Database(object):
|
||||
return hosts_cpu_mhz, hosts_cpu_cores, hosts_ram
|
||||
|
||||
def select_host_id(self, hostname):
|
||||
""" Select the ID of a host.
|
||||
"""Select the ID of a host.
|
||||
|
||||
:param hostname: A host name.
|
||||
:return: The ID of the host.
|
||||
@ -226,7 +224,7 @@ class Database(object):
|
||||
return int(row['id'])
|
||||
|
||||
def select_host_ids(self):
|
||||
""" Select the IDs of all the hosts.
|
||||
"""Select the IDs of all the hosts.
|
||||
|
||||
:return: A dict of host names to IDs.
|
||||
"""
|
||||
@ -234,7 +232,7 @@ class Database(object):
|
||||
for x in self.hosts.select().execute().fetchall())
|
||||
|
||||
def cleanup_vm_resource_usage(self, datetime_threshold):
|
||||
""" Delete VM resource usage data older than the threshold.
|
||||
"""Delete VM resource usage data older than the threshold.
|
||||
|
||||
:param datetime_threshold: A datetime threshold.
|
||||
"""
|
||||
@ -243,7 +241,7 @@ class Database(object):
|
||||
self.vm_resource_usage.c.timestamp < datetime_threshold))
|
||||
|
||||
def cleanup_host_resource_usage(self, datetime_threshold):
|
||||
""" Delete host resource usage data older than the threshold.
|
||||
"""Delete host resource usage data older than the threshold.
|
||||
|
||||
:param datetime_threshold: A datetime threshold.
|
||||
"""
|
||||
@ -252,7 +250,7 @@ class Database(object):
|
||||
self.host_resource_usage.c.timestamp < datetime_threshold))
|
||||
|
||||
def insert_host_states(self, hosts):
|
||||
""" Insert host states for a set of hosts.
|
||||
"""Insert host states for a set of hosts.
|
||||
|
||||
:param hosts: A dict of hostnames to states (0, 1).
|
||||
"""
|
||||
@ -264,7 +262,7 @@ class Database(object):
|
||||
self.host_states.insert(), to_insert)
|
||||
|
||||
def select_host_states(self):
|
||||
""" Select the current states of all the hosts.
|
||||
"""Select the current states of all the hosts.
|
||||
|
||||
:return: A dict of host names to states.
|
||||
"""
|
||||
@ -274,7 +272,7 @@ class Database(object):
|
||||
hs1.outerjoin(hs2, and_(
|
||||
hs1.c.host_id == hs2.c.host_id,
|
||||
hs1.c.id < hs2.c.id))]). \
|
||||
where(hs2.c.id == None)
|
||||
where(hs2.c.id is None)
|
||||
data = dict(self.connection.execute(sel).fetchall())
|
||||
host_ids = self.select_host_ids()
|
||||
host_states = {}
|
||||
@ -286,7 +284,7 @@ class Database(object):
|
||||
return host_states
|
||||
|
||||
def select_active_hosts(self):
|
||||
""" Select the currently active hosts.
|
||||
"""Select the currently active hosts.
|
||||
|
||||
:return: A list of host names.
|
||||
"""
|
||||
@ -295,7 +293,7 @@ class Database(object):
|
||||
if state == 1]
|
||||
|
||||
def select_inactive_hosts(self):
|
||||
""" Select the currently inactive hosts.
|
||||
"""Select the currently inactive hosts.
|
||||
|
||||
:return: A list of host names.
|
||||
"""
|
||||
@ -304,7 +302,7 @@ class Database(object):
|
||||
if state == 0]
|
||||
|
||||
def insert_host_overload(self, hostname, overload):
|
||||
""" Insert whether a host is overloaded.
|
||||
"""Insert whether a host is overloaded.
|
||||
|
||||
:param hostname: A host name.
|
||||
:param overload: Whether the host is overloaded.
|
||||
@ -314,7 +312,7 @@ class Database(object):
|
||||
overload=int(overload))
|
||||
|
||||
def insert_vm_migration(self, vm, hostname):
|
||||
""" Insert a VM migration.
|
||||
"""Insert a VM migration.
|
||||
|
||||
:param hostname: A VM UUID.
|
||||
:param hostname: A host name.
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" The database cleaner module.
|
||||
"""The database cleaner module.
|
||||
|
||||
The database cleaner periodically cleans up the data on resource usage
|
||||
by VMs stored in the database. This is requried to avoid excess growth
|
||||
@ -22,24 +22,22 @@ of the database size.
|
||||
|
||||
import datetime
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
|
||||
import terracotta.common as common
|
||||
from terracotta.config import *
|
||||
from terracotta.utils.db_utils import *
|
||||
|
||||
from terracotta.config import cfg
|
||||
from terracotta.utils.db_utils import init_db
|
||||
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def start():
|
||||
""" Start the database cleaner loop.
|
||||
"""Start the database cleaner loop.
|
||||
|
||||
:return: The final state.
|
||||
"""
|
||||
config = read_and_validate_config([DEFAULT_CONFIG_PATH, CONFIG_PATH],
|
||||
REQUIRED_FIELDS)
|
||||
config = CONF
|
||||
|
||||
common.init_logging(
|
||||
config['log_directory'],
|
||||
@ -57,7 +55,7 @@ def start():
|
||||
|
||||
|
||||
def init_state(config):
|
||||
""" Initialize a dict for storing the state of the database cleaner.
|
||||
"""Initialize a dict for storing the state of the database cleaner.
|
||||
|
||||
:param config: A config dictionary.
|
||||
:return: A dictionary containing the initial state of the database cleaner.
|
||||
@ -69,7 +67,7 @@ def init_state(config):
|
||||
|
||||
|
||||
def execute(config, state):
|
||||
""" Execute an iteration of the database cleaner.
|
||||
"""Execute an iteration of the database cleaner.
|
||||
|
||||
:param config: A config dictionary.
|
||||
:param state: A state dictionary.
|
||||
@ -84,7 +82,7 @@ def execute(config, state):
|
||||
|
||||
|
||||
def today():
|
||||
""" Return the today's datetime.
|
||||
"""Return the today's datetime.
|
||||
|
||||
:return: A datetime object representing current date and time.
|
||||
"""
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" The main global manager module.
|
||||
"""The main global manager module.
|
||||
|
||||
The global manager is deployed on the management host and is
|
||||
responsible for making VM placement decisions and initiating VM
|
||||
@ -105,7 +105,7 @@ CONF.register_opts(global_mgr_ops)
|
||||
|
||||
|
||||
def host_mac(host):
|
||||
""" Get mac address of a host.
|
||||
"""Get mac address of a host.
|
||||
|
||||
:param host: A host name.
|
||||
:return: The mac address of the host.
|
||||
@ -123,7 +123,7 @@ def host_mac(host):
|
||||
|
||||
|
||||
def flavors_ram(nova):
|
||||
""" Get a dict of flavor IDs to the RAM limits.
|
||||
"""Get a dict of flavor IDs to the RAM limits.
|
||||
|
||||
:param nova: A Nova client.
|
||||
:return: A dict of flavor IDs to the RAM limits.
|
||||
@ -132,7 +132,7 @@ def flavors_ram(nova):
|
||||
|
||||
|
||||
def vms_ram_limit(nova, vms):
|
||||
""" Get the RAM limit from the flavors of the VMs.
|
||||
"""Get the RAM limit from the flavors of the VMs.
|
||||
|
||||
:param nova: A Nova client.
|
||||
:param vms: A list of VM UUIDs.
|
||||
@ -150,7 +150,7 @@ def vms_ram_limit(nova, vms):
|
||||
|
||||
|
||||
def host_used_ram(nova, host):
|
||||
""" Get the used RAM of the host using the Nova API.
|
||||
"""Get the used RAM of the host using the Nova API.
|
||||
|
||||
:param nova: A Nova client.
|
||||
:param host: A host name.
|
||||
@ -163,7 +163,7 @@ def host_used_ram(nova, host):
|
||||
|
||||
|
||||
def vms_by_hosts(nova, hosts):
|
||||
""" Get a map of host names to VMs using the Nova API.
|
||||
"""Get a map of host names to VMs using the Nova API.
|
||||
|
||||
:param nova: A Nova client.
|
||||
:param hosts: A list of host names.
|
||||
@ -176,28 +176,28 @@ def vms_by_hosts(nova, hosts):
|
||||
|
||||
|
||||
def vms_by_host(nova, host):
|
||||
""" Get VMs from the specified host using the Nova API.
|
||||
"""Get VMs from the specified host using the Nova API.
|
||||
|
||||
:param nova: A Nova client.
|
||||
:param host: A host name.
|
||||
:return: A list of VM UUIDs from the specified host.
|
||||
"""
|
||||
return [str(vm.id) for vm in nova.servers.list()
|
||||
if (vm_hostname(vm) == host and str(
|
||||
getattr(vm, 'OS-EXT-STS:vm_state')) == 'active')]
|
||||
if (vm_hostname(vm) == host and str(getattr(
|
||||
vm, 'OS-EXT-STS:vm_state')) == 'active')]
|
||||
|
||||
|
||||
def vm_hostname(vm):
|
||||
""" Get the name of the host where VM is running.
|
||||
"""Get the name of the host where VM is running.
|
||||
|
||||
:param vm: A Nova VM object.
|
||||
:return: The hostname.
|
||||
"""
|
||||
return str(getattr(vm, 'OS-EXT-SRV-ATTR:host'))
|
||||
return str(vm.get('OS-EXT-SRV-ATTR:host'))
|
||||
|
||||
|
||||
def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
|
||||
""" Synchronously live migrate a set of VMs.
|
||||
"""Synchronously live migrate a set of VMs.
|
||||
|
||||
:param db: The database object.
|
||||
:param nova: A Nova client.
|
||||
@ -232,9 +232,8 @@ def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
|
||||
db.insert_vm_migration(vm_uuid, placement[vm_uuid])
|
||||
LOG.info('Completed migration of VM %s to %s',
|
||||
vm_uuid, placement[vm_uuid])
|
||||
elif time.time() - start_time > 300 and \
|
||||
vm_hostname(vm) != placement[vm_uuid] and \
|
||||
vm.status == u'ACTIVE':
|
||||
elif time.time() - start_time > 300 and vm_hostname(
|
||||
vm) != placement[vm_uuid] and vm.status == u'ACTIVE':
|
||||
vm_pair.remove(vm_uuid)
|
||||
retry_placement[vm_uuid] = placement[vm_uuid]
|
||||
LOG.warning('Time-out for migration of VM %s to %s, ' +
|
||||
@ -253,7 +252,7 @@ def migrate_vms(db, nova, vm_instance_directory, placement, block_migration):
|
||||
|
||||
|
||||
def migrate_vm(nova, vm_instance_directory, vm, host, block_migration):
|
||||
""" Live migrate a VM.
|
||||
"""Live migrate a VM.
|
||||
|
||||
:param nova: A Nova client.
|
||||
:param vm_instance_directory: The VM instance directory.
|
||||
@ -269,7 +268,7 @@ def migrate_vm(nova, vm_instance_directory, vm, host, block_migration):
|
||||
|
||||
|
||||
def switch_hosts_off(db, sleep_command, hosts):
|
||||
""" Switch hosts to a low-power mode.
|
||||
"""Switch hosts to a low-power mode.
|
||||
|
||||
:param db: The database object.
|
||||
:param sleep_command: A Shell command to switch off a host.
|
||||
@ -290,7 +289,7 @@ class GlobalManager(object):
|
||||
self.switch_hosts_on(self.state['compute_hosts'])
|
||||
|
||||
def init_state(self):
|
||||
""" Initialize a dict for storing the state of the global manager.
|
||||
"""Initialize a dict for storing the state of the global manager.
|
||||
"""
|
||||
return {'previous_time': 0,
|
||||
'db': db_utils.init_db(),
|
||||
@ -306,7 +305,7 @@ class GlobalManager(object):
|
||||
'host_macs': {}}
|
||||
|
||||
def switch_hosts_on(self, hosts):
|
||||
""" Switch hosts to the active mode.
|
||||
"""Switch hosts to the active mode.
|
||||
"""
|
||||
for host in hosts:
|
||||
if host not in self.state['host_macs']:
|
||||
@ -324,14 +323,14 @@ class GlobalManager(object):
|
||||
self.state['db'].insert_host_states(
|
||||
dict((x, 1) for x in hosts))
|
||||
|
||||
|
||||
def execute_underload(self, host):
|
||||
""" Process an underloaded host: migrate all VMs from the host.
|
||||
"""Process an underloaded host: migrate all VMs from the host.
|
||||
|
||||
1. Prepare the data about the current states of the hosts and VMs.
|
||||
|
||||
2. Call the function specified in the `algorithm_vm_placement_factory`
|
||||
configuration option and pass the data on the states of the hosts and VMs.
|
||||
configuration option and pass the data on the states of the hosts
|
||||
and VMs.
|
||||
|
||||
3. Call the Nova API to migrate the VMs according to the placement
|
||||
determined by the `algorithm_vm_placement_factory` algorithm.
|
||||
@ -355,7 +354,7 @@ class GlobalManager(object):
|
||||
# These VMs are new and no data have been collected from them
|
||||
for host, vms in hosts_to_vms.items():
|
||||
for i, vm in enumerate(vms):
|
||||
if not vm in vms_last_cpu:
|
||||
if vm not in vms_last_cpu:
|
||||
del hosts_to_vms[host][i]
|
||||
|
||||
LOG.debug('hosts_to_vms: %s', str(hosts_to_vms))
|
||||
@ -380,7 +379,8 @@ class GlobalManager(object):
|
||||
host_cpu_mhz += vms_last_cpu[vm]
|
||||
else:
|
||||
hosts_cpu_usage[host] = host_cpu_mhz
|
||||
hosts_ram_usage[host] = host_used_ram(state['nova'], host)
|
||||
hosts_ram_usage[host] = host_used_ram(
|
||||
self.state['nova'], host)
|
||||
else:
|
||||
# Exclude inactive hosts
|
||||
hosts_cpu_total.pop(host, None)
|
||||
@ -403,7 +403,8 @@ class GlobalManager(object):
|
||||
vms_cpu = {}
|
||||
for vm in vms_to_migrate:
|
||||
if vm not in vms_last_cpu:
|
||||
LOG.info('No data yet for VM: %s - dropping the request', vm)
|
||||
LOG.info('No data yet for VM: %s - dropping the request',
|
||||
vm)
|
||||
LOG.info('Skipped an underload request')
|
||||
return self.state
|
||||
vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
|
||||
@ -414,7 +415,7 @@ class GlobalManager(object):
|
||||
# Remove VMs that are not in vms_ram
|
||||
# These instances might have been deleted
|
||||
for i, vm in enumerate(vms_to_migrate):
|
||||
if not vm in vms_ram:
|
||||
if vm not in vms_ram:
|
||||
del vms_to_migrate[i]
|
||||
|
||||
if not vms_to_migrate:
|
||||
@ -422,7 +423,7 @@ class GlobalManager(object):
|
||||
return self.state
|
||||
|
||||
for vm in vms_cpu.keys():
|
||||
if not vm in vms_ram:
|
||||
if vm not in vms_ram:
|
||||
del vms_cpu[vm]
|
||||
|
||||
time_step = CONF.data_collector_interval
|
||||
@ -460,9 +461,8 @@ class GlobalManager(object):
|
||||
active_hosts = hosts_cpu_total.keys()
|
||||
inactive_hosts = set(self.state['compute_hosts']) - set(active_hosts)
|
||||
prev_inactive_hosts = set(self.state['db'].select_inactive_hosts())
|
||||
hosts_to_deactivate = list(inactive_hosts
|
||||
- prev_inactive_hosts
|
||||
- hosts_to_keep_active)
|
||||
hosts_to_deactivate = list(
|
||||
inactive_hosts - prev_inactive_hosts - hosts_to_keep_active)
|
||||
|
||||
if not placement:
|
||||
LOG.info('Nothing to migrate')
|
||||
@ -486,12 +486,13 @@ class GlobalManager(object):
|
||||
return self.state
|
||||
|
||||
def execute_overload(self, host, vm_uuids):
|
||||
""" Process an overloaded host: migrate the selected VMs from it.
|
||||
"""Process an overloaded host: migrate the selected VMs from it.
|
||||
|
||||
1. Prepare the data about the current states of the hosts and VMs.
|
||||
|
||||
2. Call the function specified in the `algorithm_vm_placement_factory`
|
||||
configuration option and pass the data on the states of the hosts and VMs.
|
||||
configuration option and pass the data on the states of the hosts
|
||||
and VMs.
|
||||
|
||||
3. Call the Nova API to migrate the VMs according to the placement
|
||||
determined by the `algorithm_vm_placement_factory` algorithm.
|
||||
@ -503,7 +504,8 @@ class GlobalManager(object):
|
||||
overloaded_host = host
|
||||
hosts_cpu_total, _, hosts_ram_total = self.state[
|
||||
'db'].select_host_characteristics()
|
||||
hosts_to_vms = vms_by_hosts(state['nova'], self.state['compute_hosts'])
|
||||
hosts_to_vms = vms_by_hosts(self.state['nova'],
|
||||
self.state['compute_hosts'])
|
||||
vms_last_cpu = self.state['db'].select_last_cpu_mhz_for_vms()
|
||||
hosts_last_cpu = self.state['db'].select_last_cpu_mhz_for_hosts()
|
||||
|
||||
@ -511,7 +513,7 @@ class GlobalManager(object):
|
||||
# These VMs are new and no data have been collected from them
|
||||
for host, vms in hosts_to_vms.items():
|
||||
for i, vm in enumerate(vms):
|
||||
if not vm in vms_last_cpu:
|
||||
if vm not in vms_last_cpu:
|
||||
del hosts_to_vms[host][i]
|
||||
|
||||
hosts_cpu_usage = {}
|
||||
@ -523,9 +525,9 @@ class GlobalManager(object):
|
||||
host_cpu_mhz = hosts_last_cpu[host]
|
||||
for vm in vms:
|
||||
if vm not in vms_last_cpu:
|
||||
LOG.info('No data yet for VM: %s - skipping host %s',
|
||||
vm,
|
||||
host)
|
||||
LOG.info(
|
||||
'No data yet for VM: %s - skipping host %s',
|
||||
vm, host)
|
||||
hosts_cpu_total.pop(host, None)
|
||||
hosts_ram_total.pop(host, None)
|
||||
hosts_cpu_usage.pop(host, None)
|
||||
@ -555,7 +557,9 @@ class GlobalManager(object):
|
||||
vms_cpu = {}
|
||||
for vm in vms_to_migrate:
|
||||
if vm not in vms_last_cpu:
|
||||
LOG.info('No data yet for VM: %s - dropping the request', vm)
|
||||
LOG.info(
|
||||
'No data yet for VM: %s - dropping the request',
|
||||
vm)
|
||||
LOG.info('Skipped an underload request')
|
||||
return self.state
|
||||
vms_cpu[vm] = self.state['db'].select_cpu_mhz_for_vm(
|
||||
@ -566,15 +570,16 @@ class GlobalManager(object):
|
||||
# Remove VMs that are not in vms_ram
|
||||
# These instances might have been deleted
|
||||
for i, vm in enumerate(vms_to_migrate):
|
||||
if not vm in vms_ram:
|
||||
if vm not in vms_ram:
|
||||
del vms_to_migrate[i]
|
||||
|
||||
if not vms_to_migrate:
|
||||
LOG.info('No VMs to migrate - completed the overload request')
|
||||
LOG.info(
|
||||
'No VMs to migrate - completed the overload request')
|
||||
return self.state
|
||||
|
||||
for vm in vms_cpu.keys():
|
||||
if not vm in vms_ram:
|
||||
if vm not in vms_ram:
|
||||
del vms_cpu[vm]
|
||||
|
||||
time_step = CONF.data_collector_interval
|
||||
@ -582,7 +587,7 @@ class GlobalManager(object):
|
||||
vms_ram,
|
||||
CONF.network_migration_bandwidth)
|
||||
|
||||
if 'vm_placement' not in state:
|
||||
if 'vm_placement' not in self.state:
|
||||
vm_placement_params = common.parse_parameters(
|
||||
CONF.global_manager.algorithm_vm_placement_parameters)
|
||||
vm_placement_state = None
|
||||
@ -625,7 +630,7 @@ class GlobalManager(object):
|
||||
CONF.global_manager.block_migration)
|
||||
LOG.info('Completed overload VM migrations')
|
||||
LOG.info('Completed processing an overload request')
|
||||
return state
|
||||
return self.state
|
||||
|
||||
def service(self, reason, host, vm_uuids):
|
||||
try:
|
||||
@ -635,6 +640,6 @@ class GlobalManager(object):
|
||||
else:
|
||||
LOG.info('Processing an overload, VMs: %s', str(vm_uuids))
|
||||
self.execute_overload(host, vm_uuids)
|
||||
except:
|
||||
except Exception:
|
||||
LOG.exception('Exception during request processing:')
|
||||
raise
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Bin Packing based VM placement algorithms.
|
||||
"""Bin Packing based VM placement algorithms.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
@ -23,7 +23,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def best_fit_decreasing_factory(time_step, migration_time, params):
|
||||
""" Creates the Best Fit Decreasing (BFD) heuristic for VM placement.
|
||||
"""Creates the Best Fit Decreasing (BFD) heuristic for VM placement.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -52,7 +52,7 @@ def best_fit_decreasing_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def get_available_resources(threshold, usage, total):
|
||||
""" Get a map of the available resource capacity.
|
||||
"""Get a map of the available resource capacity.
|
||||
|
||||
:param threshold: A threshold on the maximum allowed resource usage.
|
||||
:param usage: A map of hosts to the resource usage.
|
||||
@ -66,7 +66,7 @@ def get_available_resources(threshold, usage, total):
|
||||
def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram,
|
||||
inactive_hosts_cpu, inactive_hosts_ram,
|
||||
vms_cpu, vms_ram):
|
||||
""" The Best Fit Decreasing (BFD) heuristic for placing VMs on hosts.
|
||||
"""The Best Fit Decreasing (BFD) heuristic for placing VMs on hosts.
|
||||
|
||||
:param last_n_vm_cpu: The last n VM CPU usage values to average.
|
||||
:param hosts_cpu: A map of host names and their available CPU in MHz.
|
||||
@ -104,8 +104,7 @@ def best_fit_decreasing(last_n_vm_cpu, hosts_cpu, hosts_ram,
|
||||
mapped = False
|
||||
while not mapped:
|
||||
for _, _, host in hosts:
|
||||
if hosts_cpu[host] >= vm_cpu and \
|
||||
hosts_ram[host] >= vm_ram:
|
||||
if hosts_cpu[host] >= vm_cpu and hosts_ram[host] >= vm_ram:
|
||||
mapping[vm_uuid] = host
|
||||
hosts_cpu[host] -= vm_cpu
|
||||
hosts_ram[host] -= vm_ram
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" The main data collector module.
|
||||
"""The main data collector module.
|
||||
|
||||
The data collector is deployed on every compute host and is executed
|
||||
periodically to collect the CPU utilization data for each VM running
|
||||
@ -151,7 +151,7 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
self.state = self.init_state()
|
||||
|
||||
def init_state(self):
|
||||
""" Initialize a dict for storing the state of the data collector."""
|
||||
"""Initialize a dict for storing the state of the data collector."""
|
||||
vir_connection = libvirt.openReadOnly(None)
|
||||
if vir_connection is None:
|
||||
message = 'Failed to open a connection to the hypervisor'
|
||||
@ -178,7 +178,7 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
'vir_connection': vir_connection,
|
||||
'hostname': hostname,
|
||||
'host_cpu_overload_threshold':
|
||||
CONF.collector.host_cpu_overload_threshold * \
|
||||
CONF.collector.host_cpu_overload_threshold *
|
||||
host_cpu_usable_by_vms,
|
||||
'physical_cpus': physical_cpus,
|
||||
'physical_cpu_mhz': host_cpu_mhz,
|
||||
@ -187,7 +187,7 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
|
||||
@periodic_task.periodic_task(spacing=10, run_immediately=True)
|
||||
def execute(self, ctx=None):
|
||||
""" Execute a data collection iteration.
|
||||
"""Execute a data collection iteration.
|
||||
|
||||
1. Read the names of the files from the <local_data_directory>/vm
|
||||
directory to determine the list of VMs running on the host at the
|
||||
@ -310,18 +310,16 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
LOG.info('Completed an iteration')
|
||||
self.state = state
|
||||
|
||||
|
||||
def get_previous_vms(self, path):
|
||||
""" Get a list of VM UUIDs from the path.
|
||||
"""Get a list of VM UUIDs from the path.
|
||||
|
||||
:param path: A path to read VM UUIDs from.
|
||||
:return: The list of VM UUIDs from the path.
|
||||
"""
|
||||
return os.listdir(path)
|
||||
|
||||
|
||||
def get_current_vms(self, vir_connection):
|
||||
""" Get a dict of VM UUIDs to states from libvirt.
|
||||
"""Get a dict of VM UUIDs to states from libvirt.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:return: The dict of VM UUIDs to states from libvirt.
|
||||
@ -335,9 +333,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
pass
|
||||
return vm_uuids
|
||||
|
||||
|
||||
def get_added_vms(self, previous_vms, current_vms):
|
||||
""" Get a list of newly added VM UUIDs.
|
||||
"""Get a list of newly added VM UUIDs.
|
||||
|
||||
:param previous_vms: A list of VMs at the previous time frame.
|
||||
:param current_vms: A list of VM at the current time frame.
|
||||
@ -345,9 +342,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
"""
|
||||
return self.substract_lists(current_vms, previous_vms)
|
||||
|
||||
|
||||
def get_removed_vms(self, previous_vms, current_vms):
|
||||
""" Get a list of VM UUIDs removed since the last time frame.
|
||||
"""Get a list of VM UUIDs removed since the last time frame.
|
||||
|
||||
:param previous_vms: A list of VMs at the previous time frame.
|
||||
:param current_vms: A list of VM at the current time frame.
|
||||
@ -356,7 +352,7 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
return self.substract_lists(previous_vms, current_vms)
|
||||
|
||||
def substract_lists(self, list1, list2):
|
||||
""" Return the elements of list1 that are not in list2.
|
||||
"""Return the elements of list1 that are not in list2.
|
||||
|
||||
:param list1: The first list.
|
||||
:param list2: The second list.
|
||||
@ -364,9 +360,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
"""
|
||||
return list(set(list1).difference(list2))
|
||||
|
||||
|
||||
def cleanup_local_vm_data(self, path, vms):
|
||||
""" Delete the local data related to the removed VMs.
|
||||
"""Delete the local data related to the removed VMs.
|
||||
|
||||
:param path: A path to remove VM data from.
|
||||
:param vms: A list of removed VM UUIDs.
|
||||
@ -374,9 +369,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
for vm in vms:
|
||||
os.remove(os.path.join(path, vm))
|
||||
|
||||
|
||||
def cleanup_all_local_data(self, path):
|
||||
""" Delete all the local data about VMs.
|
||||
"""Delete all the local data about VMs.
|
||||
|
||||
:param path: A path to the local data directory.
|
||||
"""
|
||||
@ -386,9 +380,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
if os.access(host_path, os.F_OK):
|
||||
os.remove(host_path)
|
||||
|
||||
|
||||
def fetch_remote_data(self, db, data_length, uuids):
|
||||
""" Fetch VM data from the central DB.
|
||||
"""Fetch VM data from the central DB.
|
||||
|
||||
:param db: The database object.
|
||||
:param data_length: The length of data to fetch.
|
||||
@ -400,9 +393,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
result[uuid] = db.select_cpu_mhz_for_vm(uuid, data_length)
|
||||
return result
|
||||
|
||||
|
||||
def write_vm_data_locally(self, path, data, data_length):
|
||||
""" Write a set of CPU MHz values for a set of VMs.
|
||||
"""Write a set of CPU MHz values for a set of VMs.
|
||||
|
||||
:param path: A path to write the data to.
|
||||
:param data: A map of VM UUIDs onto the corresponing CPU MHz history.
|
||||
@ -414,9 +406,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
f.write('\n'.join([str(x)
|
||||
for x in values[-data_length:]]) + '\n')
|
||||
|
||||
|
||||
def append_vm_data_locally(self, path, data, data_length):
|
||||
""" Write a CPU MHz value for each out of a set of VMs.
|
||||
"""Write a CPU MHz value for each out of a set of VMs.
|
||||
|
||||
:param path: A path to write the data to.
|
||||
:param data: A map of VM UUIDs onto the corresponing CPU MHz values.
|
||||
@ -435,18 +426,16 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
f.seek(0)
|
||||
f.write('\n'.join([str(x) for x in values]) + '\n')
|
||||
|
||||
|
||||
def append_vm_data_remotely(self, db, data):
|
||||
""" Submit CPU MHz values to the central database.
|
||||
"""Submit CPU MHz values to the central database.
|
||||
|
||||
:param db: The database object.
|
||||
:param data: A map of VM UUIDs onto the corresponing CPU MHz values.
|
||||
"""
|
||||
db.insert_vm_cpu_mhz(data)
|
||||
|
||||
|
||||
def append_host_data_locally(self, path, cpu_mhz, data_length):
|
||||
""" Write a CPU MHz value for the host.
|
||||
"""Write a CPU MHz value for the host.
|
||||
|
||||
:param path: A path to write the data to.
|
||||
:param cpu_mhz: A CPU MHz value.
|
||||
@ -463,9 +452,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
f.seek(0)
|
||||
f.write('\n'.join([str(x) for x in values]) + '\n')
|
||||
|
||||
|
||||
def append_host_data_remotely(self, db, hostname, host_cpu_mhz):
|
||||
""" Submit a host CPU MHz value to the central database.
|
||||
"""Submit a host CPU MHz value to the central database.
|
||||
|
||||
:param db: The database object.
|
||||
:param hostname: The host name.
|
||||
@ -473,14 +461,13 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
"""
|
||||
db.insert_host_cpu_mhz(hostname, host_cpu_mhz)
|
||||
|
||||
|
||||
def get_cpu_mhz(self, vir_connection, physical_core_mhz, previous_cpu_time,
|
||||
previous_time, current_time, current_vms,
|
||||
previous_cpu_mhz, added_vm_data):
|
||||
""" Get the average CPU utilization in MHz for a set of VMs.
|
||||
def get_cpu_mhz(self, vir_connection, physical_core_mhz,
|
||||
previous_cpu_time, previous_time, current_time,
|
||||
current_vms, previous_cpu_mhz, added_vm_data):
|
||||
"""Get the average CPU utilization in MHz for a set of VMs.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:param physical_core_mhz: The core frequency of the physical CPU in MHz.
|
||||
:param physical_core_mhz: The core freq of the physical CPU in MHz.
|
||||
:param previous_cpu_time: A dict of previous CPU times for the VMs.
|
||||
:param previous_time: The previous timestamp.
|
||||
:param current_time: The previous timestamp.
|
||||
@ -529,9 +516,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
|
||||
return previous_cpu_time, cpu_mhz
|
||||
|
||||
|
||||
def get_cpu_time(self, vir_connection, uuid):
|
||||
""" Get the CPU time of a VM specified by the UUID using libvirt.
|
||||
"""Get the CPU time of a VM specified by the UUID using libvirt.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:param uuid: The UUID of a VM.
|
||||
@ -543,10 +529,9 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
except libvirt.libvirtError:
|
||||
return 0
|
||||
|
||||
|
||||
def calculate_cpu_mhz(self, cpu_mhz, previous_time, current_time,
|
||||
previous_cpu_time, current_cpu_time):
|
||||
""" Calculate the average CPU utilization in MHz for a period of time.
|
||||
"""Calculate the average CPU utilization in MHz for a period of time.
|
||||
|
||||
:param cpu_mhz: The frequency of a core of the physical CPU in MHz.
|
||||
:param previous_time: The previous time.
|
||||
@ -555,26 +540,27 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
:param current_cpu_time: The current CPU time of the domain.
|
||||
:return: The average CPU utilization in MHz.
|
||||
"""
|
||||
return int(cpu_mhz * float(current_cpu_time - previous_cpu_time) / \
|
||||
((current_time - previous_time) * 1000000000))
|
||||
|
||||
return int(cpu_mhz * float(
|
||||
current_cpu_time - previous_cpu_time) / (
|
||||
(current_time - previous_time) * 1000000000))
|
||||
|
||||
def get_host_cpu_mhz(self, cpu_mhz, previous_cpu_time_total,
|
||||
previous_cpu_time_busy):
|
||||
""" Get the average CPU utilization in MHz for a set of VMs.
|
||||
"""Get the average CPU utilization in MHz for a set of VMs.
|
||||
|
||||
:param cpu_mhz: The total frequency of the physical CPU in MHz.
|
||||
:param previous_cpu_time_total: The previous total CPU time.
|
||||
:param previous_cpu_time_busy: The previous busy CPU time.
|
||||
:return: The current total and busy CPU time, and CPU utilization in MHz.
|
||||
:return: The total and busy CPU time and CPU utilization in MHz.
|
||||
"""
|
||||
cpu_time_total, cpu_time_busy = self.get_host_cpu_time()
|
||||
cpu_usage = int(cpu_mhz * (cpu_time_busy - previous_cpu_time_busy) / \
|
||||
(cpu_time_total - previous_cpu_time_total))
|
||||
cpu_usage = int(cpu_mhz * (
|
||||
cpu_time_busy - previous_cpu_time_busy) / (
|
||||
cpu_time_total - previous_cpu_time_total))
|
||||
if cpu_usage < 0:
|
||||
raise ValueError(
|
||||
'The host CPU usage in MHz must be >=0, but it is: ' + str(
|
||||
cpu_usage) +
|
||||
'The host CPU usage in MHz must be >=0, '
|
||||
'but it is: ' + str(cpu_usage) +
|
||||
'; cpu_mhz=' + str(cpu_mhz) +
|
||||
'; previous_cpu_time_total=' + str(previous_cpu_time_total) +
|
||||
'; cpu_time_total=' + str(cpu_time_total) +
|
||||
@ -582,9 +568,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
'; cpu_time_busy=' + str(cpu_time_busy))
|
||||
return cpu_time_total, cpu_time_busy, cpu_usage
|
||||
|
||||
|
||||
def get_host_cpu_time(self):
|
||||
""" Get the total and busy CPU time of the host.
|
||||
"""Get the total and busy CPU time of the host.
|
||||
|
||||
:return: A tuple of the total and busy CPU time.
|
||||
"""
|
||||
@ -592,9 +577,8 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
values = [float(x) for x in f.readline().split()[1:8]]
|
||||
return sum(values), sum(values[0:3])
|
||||
|
||||
|
||||
def get_host_characteristics(self, vir_connection):
|
||||
""" Get the total CPU MHz and RAM of the host.
|
||||
"""Get the total CPU MHz and RAM of the host.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:return: A tuple of the total CPU MHz and RAM of the host.
|
||||
@ -602,11 +586,10 @@ class Collector(periodic_task.PeriodicTasks):
|
||||
info = vir_connection.getInfo()
|
||||
return info[2] * info[3], info[1]
|
||||
|
||||
|
||||
def log_host_overload(self, db, overload_threshold, hostname,
|
||||
previous_overload,
|
||||
host_total_mhz, host_utilization_mhz):
|
||||
""" Log to the DB whether the host is overloaded.
|
||||
"""Log to the DB whether the host is overloaded.
|
||||
|
||||
:param db: The database object.
|
||||
:param overload_threshold: The host overload threshold.
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" The main local manager module.
|
||||
"""The main local manager module.
|
||||
|
||||
The local manager component is deployed on every compute host and is
|
||||
invoked periodically to determine when it necessary to reallocate VM
|
||||
@ -103,7 +103,6 @@ local manager performs the following steps:
|
||||
from hashlib import sha1
|
||||
import libvirt
|
||||
import os
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
@ -146,12 +145,12 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
self.state = self.init_state()
|
||||
|
||||
def init_state(self):
|
||||
""" Initialize a dict for storing the state of the local manager.
|
||||
"""Initialize a dict for storing the state of the local manager.
|
||||
|
||||
:param config: A config dictionary.
|
||||
:type config: dict(str: *)
|
||||
|
||||
:return: A dictionary containing the initial state of the local manager.
|
||||
:return: A dictionary, initial state of the local manager.
|
||||
:rtype: dict
|
||||
"""
|
||||
vir_connection = libvirt.openReadOnly(None)
|
||||
@ -173,7 +172,7 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
|
||||
@periodic_task.periodic_task(spacing=10, run_immediately=True)
|
||||
def execute(self, ctx=None):
|
||||
""" Execute an iteration of the local manager.
|
||||
"""Execute an iteration of the local manager.
|
||||
|
||||
1. Read the data on resource usage by the VMs running on the host from
|
||||
the <local_data_directory>/vm directory.
|
||||
@ -201,7 +200,6 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
global manager and pass a list of the UUIDs of the VMs selected by
|
||||
the VM selection algorithm in the vm_uuids parameter, as well as
|
||||
the reason for migration as being 1.
|
||||
|
||||
"""
|
||||
LOG.info('Started an iteration')
|
||||
state = self.state
|
||||
@ -302,9 +300,8 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
LOG.info('Completed an iteration')
|
||||
self.state = state
|
||||
|
||||
|
||||
def get_local_vm_data(self, path):
|
||||
""" Read the data about VMs from the local storage.
|
||||
"""Read the data about VMs from the local storage.
|
||||
|
||||
:param path: A path to read VM UUIDs from.
|
||||
:return: A map of VM UUIDs onto the corresponing CPU MHz values.
|
||||
@ -315,13 +312,11 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
result[uuid] = [int(x) for x in f.read().strip().splitlines()]
|
||||
return result
|
||||
|
||||
|
||||
def get_local_host_data(self, path):
|
||||
""" Read the data about the host from the local storage.
|
||||
"""Read the data about the host from the local storage.
|
||||
|
||||
:param path: A path to read the host data from.
|
||||
:return: A history of the host CPU usage in MHz.
|
||||
|
||||
"""
|
||||
if not os.access(path, os.F_OK):
|
||||
return []
|
||||
@ -329,23 +324,20 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
result = [int(x) for x in f.read().strip().splitlines()]
|
||||
return result
|
||||
|
||||
|
||||
def cleanup_vm_data(self, vm_data, uuids):
|
||||
""" Remove records for the VMs that are not in the list of UUIDs.
|
||||
"""Remove records for the VMs that are not in the list of UUIDs.
|
||||
|
||||
:param vm_data: A map of VM UUIDs to some data.
|
||||
:param uuids: A list of VM UUIDs.
|
||||
:return: The cleaned up map of VM UUIDs to data.
|
||||
|
||||
"""
|
||||
for uuid, _ in vm_data.items():
|
||||
if uuid not in uuids:
|
||||
del vm_data[uuid]
|
||||
return vm_data
|
||||
|
||||
|
||||
def get_ram(self, vir_connection, vm_ids):
|
||||
""" Get the maximum RAM for a set of VM UUIDs.
|
||||
"""Get the maximum RAM for a set of VM UUIDs.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:param vm_ids: A list of VM UUIDs.
|
||||
@ -359,9 +351,8 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
|
||||
return vms_ram
|
||||
|
||||
|
||||
def get_max_ram(self, vir_connection, uuid):
|
||||
""" Get the max RAM allocated to a VM UUID using libvirt.
|
||||
"""Get the max RAM allocated to a VM UUID using libvirt.
|
||||
|
||||
:param vir_connection: A libvirt connection object.
|
||||
:param uuid: The UUID of a VM.
|
||||
@ -373,14 +364,13 @@ class LocalManager(periodic_task.PeriodicTasks):
|
||||
except libvirt.libvirtError:
|
||||
return None
|
||||
|
||||
|
||||
def vm_mhz_to_percentage(self, vm_mhz_history, host_mhz_history,
|
||||
physical_cpu_mhz):
|
||||
""" Convert VM CPU utilization to the host's CPU utilization.
|
||||
"""Convert VM CPU utilization to the host's CPU utilization.
|
||||
|
||||
:param vm_mhz_history: A list of CPU utilization histories of VMs in MHz.
|
||||
:param vm_mhz_history: List of CPU utilization histories of VMs in MHz.
|
||||
:param host_mhz_history: A history if the CPU usage by the host in MHz.
|
||||
:param physical_cpu_mhz: The total frequency of the physical CPU in MHz.
|
||||
:param physical_cpu_mhz: Total frequency of the physical CPU in MHz.
|
||||
:return: The history of the host's CPU utilization in percentages.
|
||||
"""
|
||||
max_len = max(len(x) for x in vm_mhz_history)
|
||||
|
@ -13,7 +13,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Functions for solving NLP problems using the bruteforce method.
|
||||
"""Functions for solving NLP problems using the bruteforce method.
|
||||
"""
|
||||
|
||||
import nlp
|
||||
@ -21,7 +21,7 @@ from terracotta.common import frange
|
||||
|
||||
|
||||
def solve2(objective, constraint, step, limit):
|
||||
""" Solve a maximization problem for 2 states.
|
||||
"""Solve a maximization problem for 2 states.
|
||||
|
||||
:param objective: The objective function.
|
||||
:param constraint: A tuple representing the constraint.
|
||||
@ -46,7 +46,7 @@ def solve2(objective, constraint, step, limit):
|
||||
|
||||
def optimize(step, limit, otf, migration_time, ls, p, state_vector,
|
||||
time_in_states, time_in_state_n):
|
||||
""" Solve a MHOD optimization problem.
|
||||
"""Solve a MHOD optimization problem.
|
||||
|
||||
:param step: The step size for the bruteforce algorithm.
|
||||
:param limit: The maximum value of the variables.
|
||||
|
@ -13,21 +13,21 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" This is the main module of the MHOD algorithm.
|
||||
"""This is the main module of the MHOD algorithm.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
import terracotta.locals.overload.mhod.multisize_estimation as estimation
|
||||
import terracotta.locals.overload.mhod.bruteforce as bruteforce
|
||||
from terracotta.locals.overload.mhod.l_2_states import ls
|
||||
import terracotta.locals.overload.mhod.multisize_estimation as estimation
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def mhod_factory(time_step, migration_time, params):
|
||||
""" Creates the MHOD algorithm.
|
||||
"""Creates the MHOD algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -54,7 +54,7 @@ def mhod_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def init_state(history_size, window_sizes, number_of_states):
|
||||
""" Initialize the state dictionary of the MHOD algorithm.
|
||||
"""Initialize the state dictionary of the MHOD algorithm.
|
||||
|
||||
:param history_size: The number of last system states to store.
|
||||
:param window_sizes: The required window sizes.
|
||||
@ -78,7 +78,7 @@ def init_state(history_size, window_sizes, number_of_states):
|
||||
|
||||
def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
|
||||
time_step, migration_time, utilization, state):
|
||||
""" The MHOD algorithm returning whether the host is overloaded.
|
||||
"""The MHOD algorithm returning whether the host is overloaded.
|
||||
|
||||
:param state_config: The state configuration.
|
||||
:param otf: The OTF parameter.
|
||||
@ -147,7 +147,8 @@ def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
|
||||
# if utilization_length > state['time_in_states'] + 1:
|
||||
# for s in utilization_to_states(
|
||||
# state_config,
|
||||
# utilization[-(utilization_length - state['time_in_states']):]):
|
||||
# utilization[-(utilization_length -
|
||||
# state['time_in_states']):]):
|
||||
# state['time_in_states'] += 1
|
||||
# if s == state_n:
|
||||
# state['time_in_state_n'] += 1
|
||||
@ -165,7 +166,7 @@ def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
|
||||
|
||||
if utilization_length >= learning_steps:
|
||||
if current_state == state_n and p[state_n][state_n] > 0:
|
||||
# if p[current_state][state_n] > 0:
|
||||
# if p[current_state][state_n] > 0:
|
||||
policy = bruteforce.optimize(
|
||||
bruteforce_step, 1.0, otf, (migration_time / time_step), ls, p,
|
||||
state_vector, state['time_in_states'],
|
||||
@ -180,7 +181,7 @@ def mhod(state_config, otf, window_sizes, bruteforce_step, learning_steps,
|
||||
|
||||
|
||||
def build_state_vector(state_config, utilization):
|
||||
""" Build the current state PMF corresponding to the utilization
|
||||
"""Build the current state PMF corresponding to the utilization
|
||||
history and state config.
|
||||
|
||||
:param state_config: The state configuration.
|
||||
@ -192,7 +193,7 @@ def build_state_vector(state_config, utilization):
|
||||
|
||||
|
||||
def utilization_to_state(state_config, utilization):
|
||||
""" Transform a utilization value into the corresponding state.
|
||||
"""Transform a utilization value into the corresponding state.
|
||||
|
||||
:param state_config: The state configuration.
|
||||
:param utilization: A utilization value.
|
||||
@ -207,7 +208,8 @@ def utilization_to_state(state_config, utilization):
|
||||
|
||||
|
||||
def get_current_state(state_vector):
|
||||
""" Get the current state corresponding to the state probability vector.
|
||||
"""Get the current state corresponding to the state probability
|
||||
vector.
|
||||
|
||||
:param state_vector: The state PMF vector.
|
||||
:return: The current state.
|
||||
@ -216,10 +218,8 @@ def get_current_state(state_vector):
|
||||
|
||||
|
||||
def utilization_to_states(state_config, utilization):
|
||||
""" Get the state history corresponding to the utilization history.
|
||||
|
||||
"""Get the state history corresponding to the utilization history.
|
||||
Adds the 0 state to the beginning to simulate the first transition.
|
||||
|
||||
(map (partial utilization-to-state state-config) utilization))
|
||||
|
||||
:param state_config: The state configuration.
|
||||
@ -230,7 +230,7 @@ def utilization_to_states(state_config, utilization):
|
||||
|
||||
|
||||
def issue_command_deterministic(policy):
|
||||
""" Issue a migration command according to the policy PMF p.
|
||||
"""Issue a migration command according to the policy PMF p.
|
||||
|
||||
:param policy: A policy PMF.
|
||||
:return: A migration command.
|
||||
|
@ -13,12 +13,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" L functions for the 2 state configuration of the MHOD algorithm.
|
||||
"""L functions for the 2 state configuration of the MHOD algorithm.
|
||||
"""
|
||||
|
||||
|
||||
def l0(p_initial, p_matrix, m):
|
||||
""" Compute the L0 function.
|
||||
"""Compute the L0 function.
|
||||
|
||||
:param p_initial: The initial state distribution.
|
||||
:param p_matrix: A matrix of transition probabilities.
|
||||
@ -40,7 +40,7 @@ def l0(p_initial, p_matrix, m):
|
||||
|
||||
|
||||
def l1(p_initial, p_matrix, m):
|
||||
""" Compute the L1 function.
|
||||
"""Compute the L1 function.
|
||||
|
||||
:param p_initial: The initial state distribution.
|
||||
:param p_matrix: A matrix of transition probabilities.
|
||||
|
@ -12,15 +12,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Multisize sliding window workload estimation functions.
|
||||
"""Multisize sliding window workload estimation functions.
|
||||
"""
|
||||
|
||||
from itertools import islice
|
||||
from collections import deque
|
||||
from itertools import islice
|
||||
|
||||
|
||||
def mean(data, window_size):
|
||||
""" Get the data mean according to the window size.
|
||||
"""Get the data mean according to the window size.
|
||||
|
||||
:param data: A list of values.
|
||||
:param window_size: A window size.
|
||||
@ -30,7 +30,7 @@ def mean(data, window_size):
|
||||
|
||||
|
||||
def variance(data, window_size):
|
||||
""" Get the data variance according to the window size.
|
||||
"""Get the data variance according to the window size.
|
||||
|
||||
:param data: A list of values.
|
||||
:param window_size: A window size.
|
||||
@ -41,7 +41,7 @@ def variance(data, window_size):
|
||||
|
||||
|
||||
def acceptable_variance(probability, window_size):
|
||||
""" Get the acceptable variance.
|
||||
"""Get the acceptable variance.
|
||||
|
||||
:param probability: The probability to use.
|
||||
:param window_size: A window size.
|
||||
@ -51,7 +51,7 @@ def acceptable_variance(probability, window_size):
|
||||
|
||||
|
||||
def estimate_probability(data, window_size, state):
|
||||
""" Get the estimated probability.
|
||||
"""Get the estimated probability.
|
||||
|
||||
:param data: A list of data values.
|
||||
:param window_size: The window size.
|
||||
@ -62,7 +62,7 @@ def estimate_probability(data, window_size, state):
|
||||
|
||||
|
||||
def update_request_windows(request_windows, previous_state, current_state):
|
||||
""" Update and return the updated request windows.
|
||||
"""Update and return the updated request windows.
|
||||
|
||||
:param request_windows: The previous request windows.
|
||||
:param previous_state: The previous state.
|
||||
@ -75,7 +75,7 @@ def update_request_windows(request_windows, previous_state, current_state):
|
||||
|
||||
def update_estimate_windows(estimate_windows, request_windows,
|
||||
previous_state):
|
||||
""" Update and return the updated estimate windows.
|
||||
"""Update and return the updated estimate windows.
|
||||
|
||||
:param estimate_windows: The previous estimate windows.
|
||||
:param request_windows: The current request windows.
|
||||
@ -97,7 +97,7 @@ def update_estimate_windows(estimate_windows, request_windows,
|
||||
|
||||
|
||||
def update_variances(variances, estimate_windows, previous_state):
|
||||
""" Updated and return the updated variances.
|
||||
"""Updated and return the updated variances.
|
||||
|
||||
:param variances: The previous variances.
|
||||
:param estimate_windows: The current estimate windows.
|
||||
@ -118,7 +118,7 @@ def update_variances(variances, estimate_windows, previous_state):
|
||||
|
||||
def update_acceptable_variances(acceptable_variances, estimate_windows,
|
||||
previous_state):
|
||||
""" Update and return the updated acceptable variances.
|
||||
"""Update and return the updated acceptable variances.
|
||||
|
||||
:param acceptable_variances: The previous acceptable variances.
|
||||
:param estimate_windows: The current estimate windows.
|
||||
@ -136,7 +136,7 @@ def update_acceptable_variances(acceptable_variances, estimate_windows,
|
||||
|
||||
|
||||
def select_window(variances, acceptable_variances, window_sizes):
|
||||
""" Select window sizes according to the acceptable variances.
|
||||
"""Select window sizes according to the acceptable variances.
|
||||
|
||||
:param variances: The variances.
|
||||
:param acceptable_variances: The acceptable variances.
|
||||
@ -159,7 +159,7 @@ def select_window(variances, acceptable_variances, window_sizes):
|
||||
|
||||
|
||||
def select_best_estimates(estimate_windows, selected_windows):
|
||||
""" Select the best estimates according to the selected windows.
|
||||
"""Select the best estimates according to the selected windows.
|
||||
|
||||
:param estimate_windows: The estimate windows.
|
||||
:param selected_windows: The selected window sizes.
|
||||
@ -179,7 +179,7 @@ def select_best_estimates(estimate_windows, selected_windows):
|
||||
|
||||
|
||||
def init_request_windows(number_of_states, max_window_size):
|
||||
""" Initialize a request window data structure.
|
||||
"""Initialize a request window data structure.
|
||||
|
||||
:param number_of_states: The number of states.
|
||||
:param max_window_size: The max size of the request windows.
|
||||
@ -190,7 +190,7 @@ def init_request_windows(number_of_states, max_window_size):
|
||||
|
||||
|
||||
def init_variances(window_sizes, number_of_states):
|
||||
""" Initialize a variances data structure.
|
||||
"""Initialize a variances data structure.
|
||||
|
||||
:param window_sizes: The required window sizes.
|
||||
:param number_of_states: The number of states.
|
||||
@ -206,7 +206,7 @@ def init_variances(window_sizes, number_of_states):
|
||||
|
||||
|
||||
def init_deque_structure(window_sizes, number_of_states):
|
||||
""" Initialize a 3 level deque data structure.
|
||||
"""Initialize a 3 level deque data structure.
|
||||
|
||||
:param window_sizes: The required window sizes.
|
||||
:param number_of_states: The number of states.
|
||||
@ -222,7 +222,7 @@ def init_deque_structure(window_sizes, number_of_states):
|
||||
|
||||
|
||||
def init_selected_window_sizes(window_sizes, number_of_states):
|
||||
""" Initialize a selected window sizes data structure.
|
||||
"""Initialize a selected window sizes data structure.
|
||||
|
||||
:param window_sizes: The required window sizes.
|
||||
:param number_of_states: The number of states.
|
||||
|
@ -12,14 +12,14 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Functions for defing the NLP problem of the MHOD algorithm.
|
||||
"""Functions for defing the NLP problem of the MHOD algorithm.
|
||||
"""
|
||||
|
||||
import operator
|
||||
|
||||
|
||||
def build_objective(ls, state_vector, p):
|
||||
""" Creates an objective function, which is a sum of the L functions.
|
||||
"""Creates an objective function, which is a sum of the L functions.
|
||||
|
||||
:param ls: A list of L functions.
|
||||
:param state-vector: A state vector.
|
||||
@ -31,10 +31,9 @@ def build_objective(ls, state_vector, p):
|
||||
return objective
|
||||
|
||||
|
||||
@contract
|
||||
def build_constraint(otf, migration_time, ls, state_vector,
|
||||
p, time_in_states, time_in_state_n):
|
||||
""" Creates an optimization constraint from the L functions.
|
||||
"""Creates an optimization constraint from the L functions.
|
||||
|
||||
:param otf: The OTF parameter.
|
||||
:param migration_time: The VM migration time in time steps.
|
||||
@ -53,4 +52,4 @@ def build_constraint(otf, migration_time, ls, state_vector,
|
||||
(migration_time +
|
||||
time_in_states +
|
||||
sum(l(state_vector, p, m_list) for l in ls))
|
||||
return (constraint, operator.le, otf)
|
||||
return constraint, operator.le, otf
|
||||
|
@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" OTF threshold based algorithms.
|
||||
"""OTF threshold based algorithms.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def otf_factory(time_step, migration_time, params):
|
||||
""" Creates the OTF algorithm with limiting and migration time.
|
||||
"""Creates the OTF algorithm with limiting and migration time.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -30,6 +30,7 @@ def otf_factory(time_step, migration_time, params):
|
||||
:return: A function implementing the OTF algorithm.
|
||||
"""
|
||||
migration_time_normalized = float(migration_time) / time_step
|
||||
|
||||
def otf_wrapper(utilization, state=None):
|
||||
if state is None or state == {}:
|
||||
state = {'overload': 0,
|
||||
@ -45,7 +46,7 @@ def otf_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def otf(otf, threshold, limit, migration_time, utilization, state):
|
||||
""" The OTF threshold algorithm with limiting and migration time.
|
||||
"""The OTF threshold algorithm with limiting and migration time.
|
||||
|
||||
:param otf: The threshold on the OTF value.
|
||||
:param threshold: The utilization overload threshold.
|
||||
@ -66,16 +67,17 @@ def otf(otf, threshold, limit, migration_time, utilization, state):
|
||||
LOG.debug('OTF:' + str(float(state['overload']) / state['total']))
|
||||
LOG.debug('OTF migration time:' + str(migration_time))
|
||||
LOG.debug('OTF + migration time:' +
|
||||
str((migration_time + state['overload']) / \
|
||||
(migration_time + state['total'])))
|
||||
str((migration_time + state['overload']
|
||||
) / (migration_time + state['total'])))
|
||||
LOG.debug('OTF decision:' +
|
||||
str(overload and (migration_time + state['overload']) / \
|
||||
(migration_time + state['total']) >= otf))
|
||||
str(overload and (
|
||||
migration_time + state['overload']) / (
|
||||
migration_time + state['total']) >= otf))
|
||||
|
||||
if not overload or len(utilization) < limit:
|
||||
decision = False
|
||||
else:
|
||||
decision = (migration_time + state['overload']) / \
|
||||
(migration_time + state['total']) >= otf
|
||||
decision = (migration_time + state['overload']) / (
|
||||
migration_time + state['total']) >= otf
|
||||
|
||||
return (decision, state)
|
||||
return decision, state
|
||||
|
@ -12,16 +12,16 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Statistics based overload detection algorithms.
|
||||
"""Statistics based overload detection algorithms.
|
||||
"""
|
||||
|
||||
import numpy as np
|
||||
from numpy import median
|
||||
from scipy.optimize import leastsq
|
||||
import numpy as np
|
||||
|
||||
|
||||
def loess_factory(time_step, migration_time, params):
|
||||
""" Creates the Loess based overload detection algorithm.
|
||||
"""Creates the Loess based overload detection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -39,7 +39,7 @@ def loess_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def loess_robust_factory(time_step, migration_time, params):
|
||||
""" Creates the robust Loess based overload detection algorithm.
|
||||
"""Creates the robust Loess based overload detection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -57,7 +57,7 @@ def loess_robust_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def mad_threshold_factory(time_step, migration_time, params):
|
||||
""" Creates the MAD based utilization threshold algorithm.
|
||||
"""Creates the MAD based utilization threshold algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -72,7 +72,7 @@ def mad_threshold_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def iqr_threshold_factory(time_step, migration_time, params):
|
||||
""" Creates the IQR based utilization threshold algorithm.
|
||||
"""Creates the IQR based utilization threshold algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -87,7 +87,7 @@ def iqr_threshold_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def loess(threshold, param, length, migration_time, utilization):
|
||||
""" The Loess based overload detection algorithm.
|
||||
"""The Loess based overload detection algorithm.
|
||||
|
||||
:param threshold: The CPU utilization threshold.
|
||||
:param param: The safety parameter.
|
||||
@ -105,7 +105,7 @@ def loess(threshold, param, length, migration_time, utilization):
|
||||
|
||||
|
||||
def loess_robust(threshold, param, length, migration_time, utilization):
|
||||
""" The robust Loess based overload detection algorithm.
|
||||
"""The robust Loess based overload detection algorithm.
|
||||
|
||||
:param threshold: The CPU utilization threshold.
|
||||
:param param: The safety parameter.
|
||||
@ -122,8 +122,9 @@ def loess_robust(threshold, param, length, migration_time, utilization):
|
||||
utilization)
|
||||
|
||||
|
||||
def loess_abstract(estimator, threshold, param, length, migration_time, utilization):
|
||||
""" The abstract Loess algorithm.
|
||||
def loess_abstract(estimator, threshold, param, length, migration_time,
|
||||
utilization):
|
||||
"""The abstract Loess algorithm.
|
||||
|
||||
:param estimator: A parameter estimation function.
|
||||
:param threshold: The CPU utilization threshold.
|
||||
@ -141,7 +142,7 @@ def loess_abstract(estimator, threshold, param, length, migration_time, utilizat
|
||||
|
||||
|
||||
def mad_threshold(param, limit, utilization):
|
||||
""" The MAD based threshold algorithm.
|
||||
"""The MAD based threshold algorithm.
|
||||
|
||||
:param param: The safety parameter.
|
||||
:param limit: The minimum allowed length of the utilization history.
|
||||
@ -154,7 +155,7 @@ def mad_threshold(param, limit, utilization):
|
||||
|
||||
|
||||
def iqr_threshold(param, limit, utilization):
|
||||
""" The IQR based threshold algorithm.
|
||||
"""The IQR based threshold algorithm.
|
||||
|
||||
:param param: The safety parameter.
|
||||
:param limit: The minimum allowed length of the utilization history.
|
||||
@ -167,7 +168,7 @@ def iqr_threshold(param, limit, utilization):
|
||||
|
||||
|
||||
def utilization_threshold_abstract(f, limit, utilization):
|
||||
""" The abstract utilization threshold algorithm.
|
||||
"""The abstract utilization threshold algorithm.
|
||||
|
||||
:param f: A function to calculate the utilization threshold.
|
||||
:param limit: The minimum allowed length of the utilization history.
|
||||
@ -180,7 +181,7 @@ def utilization_threshold_abstract(f, limit, utilization):
|
||||
|
||||
|
||||
def mad(data):
|
||||
""" Calculate the Median Absolute Deviation from the data.
|
||||
"""Calculate the Median Absolute Deviation from the data.
|
||||
|
||||
:param data: The data to analyze.
|
||||
:return: The calculated MAD.
|
||||
@ -190,7 +191,7 @@ def mad(data):
|
||||
|
||||
|
||||
def iqr(data):
|
||||
""" Calculate the Interquartile Range from the data.
|
||||
"""Calculate the Interquartile Range from the data.
|
||||
|
||||
:param data: The data to analyze.
|
||||
:return: The calculated IQR.
|
||||
@ -203,7 +204,7 @@ def iqr(data):
|
||||
|
||||
|
||||
def loess_parameter_estimates(data):
|
||||
""" Calculate Loess parameter estimates.
|
||||
"""Calculate Loess parameter estimates.
|
||||
|
||||
:param data: A data set.
|
||||
:return: The parameter estimates.
|
||||
@ -221,7 +222,7 @@ def loess_parameter_estimates(data):
|
||||
|
||||
|
||||
def loess_robust_parameter_estimates(data):
|
||||
""" Calculate Loess robust parameter estimates.
|
||||
"""Calculate Loess robust parameter estimates.
|
||||
|
||||
:param data: A data set.
|
||||
:return: The parameter estimates.
|
||||
@ -245,7 +246,7 @@ def loess_robust_parameter_estimates(data):
|
||||
|
||||
|
||||
def tricube_weights(n):
|
||||
""" Generates a list of weights according to the tricube function.
|
||||
"""Generates a list of weights according to the tricube function.
|
||||
|
||||
:param n: The number of weights to generate.
|
||||
:return: A list of generated weights.
|
||||
@ -258,7 +259,7 @@ def tricube_weights(n):
|
||||
|
||||
|
||||
def tricube_bisquare_weights(data):
|
||||
""" Generates a weights according to the tricube bisquare function.
|
||||
"""Generates a weights according to the tricube bisquare function.
|
||||
|
||||
:param data: The input data.
|
||||
:return: A list of generated weights.
|
||||
|
@ -12,12 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Trivial overload detection algorithms.
|
||||
"""Trivial overload detection algorithms.
|
||||
"""
|
||||
|
||||
|
||||
def never_overloaded_factory(time_step, migration_time, params):
|
||||
""" Creates an algorithm that never considers the host overloaded.
|
||||
"""Creates an algorithm that never considers the host overloaded.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -28,7 +28,7 @@ def never_overloaded_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def threshold_factory(time_step, migration_time, params):
|
||||
""" Creates the static CPU utilization threshold algorithm.
|
||||
"""Creates the static CPU utilization threshold algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -41,7 +41,7 @@ def threshold_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def last_n_average_threshold_factory(time_step, migration_time, params):
|
||||
""" Creates the averaging CPU utilization threshold algorithm.
|
||||
"""Creates the averaging CPU utilization threshold algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -56,7 +56,7 @@ def last_n_average_threshold_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def threshold(threshold, utilization):
|
||||
""" The static CPU utilization threshold algorithm.
|
||||
"""The static CPU utilization threshold algorithm.
|
||||
|
||||
:param threshold: The threshold on the CPU utilization.
|
||||
:param utilization: The history of the host's CPU utilization.
|
||||
@ -68,7 +68,7 @@ def threshold(threshold, utilization):
|
||||
|
||||
|
||||
def last_n_average_threshold(threshold, n, utilization):
|
||||
""" The averaging CPU utilization threshold algorithm.
|
||||
"""The averaging CPU utilization threshold algorithm.
|
||||
|
||||
:param threshold: The threshold on the CPU utilization.
|
||||
:param n: The number of last CPU utilization values to average.
|
||||
|
@ -12,12 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" Trivial underload detection algorithms.
|
||||
"""Trivial underload detection algorithms.
|
||||
"""
|
||||
|
||||
|
||||
def always_underloaded_factory(time_step, migration_time, params):
|
||||
""" Creates an algorithm that always considers the host underloaded.
|
||||
"""Creates an algorithm that always considers the host underloaded.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -28,7 +28,7 @@ def always_underloaded_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def threshold_factory(time_step, migration_time, params):
|
||||
""" Creates the threshold underload detection algorithm.
|
||||
"""Creates the threshold underload detection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -39,8 +39,9 @@ def threshold_factory(time_step, migration_time, params):
|
||||
utilization),
|
||||
{})
|
||||
|
||||
|
||||
def last_n_average_threshold_factory(time_step, migration_time, params):
|
||||
""" Creates the averaging threshold underload detection algorithm.
|
||||
"""Creates the averaging threshold underload detection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -55,7 +56,7 @@ def last_n_average_threshold_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def threshold(threshold, utilization):
|
||||
""" Static threshold-based underload detection algorithm.
|
||||
"""Static threshold-based underload detection algorithm.
|
||||
|
||||
The algorithm returns True, if the last value of the host's
|
||||
CPU utilization is lower than the specified threshold.
|
||||
@ -70,7 +71,7 @@ def threshold(threshold, utilization):
|
||||
|
||||
|
||||
def last_n_average_threshold(threshold, n, utilization):
|
||||
""" Averaging static threshold-based underload detection algorithm.
|
||||
"""Averaging static threshold-based underload detection algorithm.
|
||||
|
||||
The algorithm returns True, if the average of the last n values of
|
||||
the host's CPU utilization is lower than the specified threshold.
|
||||
|
@ -12,15 +12,15 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
""" VM selection algorithms.
|
||||
"""VM selection algorithms.
|
||||
"""
|
||||
|
||||
from random import choice
|
||||
import operator
|
||||
from random import choice
|
||||
|
||||
|
||||
def random_factory(time_step, migration_time, params):
|
||||
""" Creates the random VM selection algorithm.
|
||||
"""Creates the random VM selection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -31,7 +31,7 @@ def random_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def minimum_utilization_factory(time_step, migration_time, params):
|
||||
""" Creates the minimum utilization VM selection algorithm.
|
||||
"""Creates the minimum utilization VM selection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -43,7 +43,7 @@ def minimum_utilization_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def minimum_migration_time_factory(time_step, migration_time, params):
|
||||
""" Creates the minimum migration time VM selection algorithm.
|
||||
"""Creates the minimum migration time VM selection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
@ -55,12 +55,14 @@ def minimum_migration_time_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def minimum_migration_time_max_cpu_factory(time_step, migration_time, params):
|
||||
""" Creates the minimum migration time / max CPU usage VM selection algorithm.
|
||||
"""Creates the minimum migration time / max CPU usage
|
||||
VM selection algorithm.
|
||||
|
||||
:param time_step: The length of the simulation time step in seconds.
|
||||
:param migration_time: The VM migration time in time seconds.
|
||||
:param params: A dictionary containing the algorithm's parameters.
|
||||
:return: A function implementing the minimum migration time / max CPU VM selection.
|
||||
:return: A function implementing the minimum migration time / max
|
||||
CPU VM selection.
|
||||
"""
|
||||
return lambda vms_cpu, vms_ram, state=None: \
|
||||
([minimum_migration_time_max_cpu(params['last_n'],
|
||||
@ -69,7 +71,7 @@ def minimum_migration_time_max_cpu_factory(time_step, migration_time, params):
|
||||
|
||||
|
||||
def minimum_migration_time(vms_ram):
|
||||
""" Selects the VM with the minimum RAM usage.
|
||||
"""Selects the VM with the minimum RAM usage.
|
||||
|
||||
:param vms_ram: A map of VM UUID and their RAM usage data.
|
||||
:return: A VM to migrate from the host.
|
||||
@ -80,7 +82,7 @@ def minimum_migration_time(vms_ram):
|
||||
|
||||
|
||||
def minimum_utilization(vms_cpu):
|
||||
""" Selects the VM with the minimum CPU utilization.
|
||||
"""Selects the VM with the minimum CPU utilization.
|
||||
|
||||
:param vms_cpu: A map of VM UUID and their CPU utilization histories.
|
||||
:return: A VM to migrate from the host.
|
||||
@ -92,7 +94,7 @@ def minimum_utilization(vms_cpu):
|
||||
|
||||
|
||||
def random(vms_cpu):
|
||||
""" Selects a random VM.
|
||||
"""Selects a random VM.
|
||||
|
||||
:param vms_cpu: A map of VM UUID and their CPU utilization histories.
|
||||
:return: A VM to migrate from the host.
|
||||
@ -101,7 +103,7 @@ def random(vms_cpu):
|
||||
|
||||
|
||||
def minimum_migration_time_max_cpu(last_n, vms_cpu, vms_ram):
|
||||
""" Selects the VM with the minimum RAM and maximum CPU usage.
|
||||
"""Selects the VM with the minimum RAM and maximum CPU usage.
|
||||
|
||||
:param last_n: The number of last CPU utilization values to average.
|
||||
:param vms_cpu: A map of VM UUID and their CPU utilization histories.
|
||||
|
@ -17,6 +17,7 @@ from oslo_log import log as logging
|
||||
import oslo_messaging as messaging
|
||||
from oslo_messaging.rpc import client
|
||||
|
||||
from terracotta import context as auth_ctx
|
||||
from terracotta import exceptions as exc
|
||||
|
||||
|
||||
@ -96,7 +97,7 @@ def wrap_messaging_exception(method):
|
||||
return decorator
|
||||
|
||||
|
||||
class EngineClient():
|
||||
class EngineClient(object):
|
||||
"""RPC Engine client."""
|
||||
|
||||
def __init__(self, transport):
|
||||
@ -121,7 +122,7 @@ class LocalManagerServer(object):
|
||||
self._executor = manager
|
||||
|
||||
|
||||
class ExecutorClient():
|
||||
class ExecutorClient(object):
|
||||
"""RPC Executor client."""
|
||||
|
||||
def __init__(self, transport):
|
||||
|
@ -1,13 +0,0 @@
|
||||
# Copyright 2012 Anton Beloglazov
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
20
terracotta/tests/base.py
Normal file
20
terracotta/tests/base.py
Normal file
@ -0,0 +1,20 @@
|
||||
# Copyright (c) 2016 Huawei Technologies Co., Ltd.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslotest import base
|
||||
|
||||
|
||||
class TestCase(base.BaseTestCase):
|
||||
"""Test case base class for all unit tests."""
|
28
terracotta/tests/unit/test_fake.py
Normal file
28
terracotta/tests/unit/test_fake.py
Normal file
@ -0,0 +1,28 @@
|
||||
# Copyright 2016 Huawei Technologies Co. Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import unittest2
|
||||
|
||||
|
||||
class TestCase(unittest2.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
"""Run before each test method to initialize test environment."""
|
||||
super(TestCase, self).setUp()
|
||||
|
||||
|
||||
class FakeTest(TestCase):
|
||||
|
||||
def test_fake_test(self):
|
||||
pass
|
@ -13,8 +13,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from sqlalchemy import *
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import MetaData
|
||||
from sqlalchemy.sql import func
|
||||
from sqlalchemy import Column, DateTime, ForeignKey, Integer, String, Table
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
@ -27,7 +29,7 @@ CONF = cfg.CONF
|
||||
|
||||
|
||||
def init_db():
|
||||
""" Initialize the database.
|
||||
"""Initialize the database.
|
||||
|
||||
:param sql_connection: A database connection URL.
|
||||
:return: The initialized database.
|
||||
@ -92,5 +94,6 @@ def init_db():
|
||||
vm_resource_usage, vm_migrations, host_states,
|
||||
host_overload)
|
||||
|
||||
LOG.debug('Initialized a DB connection to %s', CONF.database.sql_connection)
|
||||
LOG.debug('Initialized a DB connection to %s',
|
||||
CONF.database.sql_connection)
|
||||
return db
|
||||
|
@ -1,5 +1,3 @@
|
||||
#!/usr/bin/python2
|
||||
|
||||
# Copyright 2012 Anton Beloglazov
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -14,13 +12,12 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import time
|
||||
from datetime import datetime
|
||||
from db import init_db
|
||||
from db_utils import init_db
|
||||
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
if len(sys.argv) < 5:
|
||||
print 'You must specify 4 arguments:'
|
||||
@ -30,17 +27,16 @@ if len(sys.argv) < 5:
|
||||
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
|
||||
sys.exit(1)
|
||||
|
||||
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
|
||||
db = init_db(
|
||||
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
|
||||
start_time = datetime.fromtimestamp(
|
||||
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
|
||||
finish_time = datetime.fromtimestamp(
|
||||
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
|
||||
|
||||
#print "Start time: " + str(start_time)
|
||||
#print "Finish time: " + str(finish_time)
|
||||
|
||||
def total_seconds(delta):
|
||||
return (delta.microseconds +
|
||||
return (delta.microseconds +
|
||||
(delta.seconds + delta.days * 24 * 3600) * 1000000) / 1000000
|
||||
|
||||
total_time = 0
|
||||
@ -49,13 +45,15 @@ for hostname, host_id in db.select_host_ids().items():
|
||||
prev_timestamp = start_time
|
||||
prev_state = 1
|
||||
states = {0: [], 1: []}
|
||||
for timestamp, state in db.select_host_states(host_id, start_time, finish_time):
|
||||
for timestamp, state in db.select_host_states(
|
||||
host_id, start_time, finish_time):
|
||||
if prev_timestamp:
|
||||
states[prev_state].append(total_seconds(timestamp - prev_timestamp))
|
||||
states[prev_state].append(total_seconds(
|
||||
timestamp - prev_timestamp))
|
||||
prev_timestamp = timestamp
|
||||
prev_state = state
|
||||
states[prev_state].append(total_seconds(finish_time - prev_timestamp))
|
||||
#print states
|
||||
states[prev_state].append(total_seconds(
|
||||
finish_time - prev_timestamp))
|
||||
off_time = sum(states[0])
|
||||
on_time = sum(states[1])
|
||||
total_time += off_time + on_time
|
||||
@ -63,4 +61,5 @@ for hostname, host_id in db.select_host_ids().items():
|
||||
|
||||
print "Total time: " + str(total_time)
|
||||
print "Total idle time: " + str(total_idle_time)
|
||||
print "Idle time fraction: " + str(float(total_idle_time) / total_time)
|
||||
print "Idle time fraction: " + str(
|
||||
float(total_idle_time) / total_time)
|
||||
|
@ -1,5 +1,3 @@
|
||||
#!/usr/bin/python2
|
||||
|
||||
# Copyright 2012 Anton Beloglazov
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@ -14,13 +12,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import time
|
||||
from datetime import datetime
|
||||
from db import init_db
|
||||
from db_utils import init_db
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
if len(sys.argv) < 5:
|
||||
print 'You must specify 4 arguments:'
|
||||
@ -30,17 +26,16 @@ if len(sys.argv) < 5:
|
||||
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
|
||||
sys.exit(1)
|
||||
|
||||
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/neat')
|
||||
db = init_db(
|
||||
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/terracotta')
|
||||
start_time = datetime.fromtimestamp(
|
||||
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
|
||||
finish_time = datetime.fromtimestamp(
|
||||
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
|
||||
|
||||
#print "Start time: " + str(start_time)
|
||||
#print "Finish time: " + str(finish_time)
|
||||
|
||||
def total_seconds(delta):
|
||||
return (delta.microseconds +
|
||||
return (delta.microseconds +
|
||||
(delta.seconds + delta.days * 24 * 3600) * 1000000) / 1000000
|
||||
|
||||
total_idle_time = 0
|
||||
@ -48,13 +43,15 @@ for hostname, host_id in db.select_host_ids().items():
|
||||
prev_timestamp = start_time
|
||||
prev_state = 1
|
||||
states = {0: [], 1: []}
|
||||
for timestamp, state in db.select_host_states(host_id, start_time, finish_time):
|
||||
for timestamp, state in db.select_host_states(host_id,
|
||||
start_time, finish_time):
|
||||
if prev_timestamp:
|
||||
states[prev_state].append(total_seconds(timestamp - prev_timestamp))
|
||||
states[prev_state].append(total_seconds(
|
||||
timestamp - prev_timestamp))
|
||||
prev_timestamp = timestamp
|
||||
prev_state = state
|
||||
states[prev_state].append(total_seconds(finish_time - prev_timestamp))
|
||||
#print states
|
||||
states[prev_state].append(total_seconds(
|
||||
finish_time - prev_timestamp))
|
||||
off_time = sum(states[0])
|
||||
total_idle_time += off_time
|
||||
|
||||
@ -64,13 +61,15 @@ for hostname, host_id in db.select_host_ids().items():
|
||||
prev_timestamp = start_time
|
||||
prev_state = 0
|
||||
states = {0: [], 1: []}
|
||||
for timestamp, state in db.select_host_overload(host_id, start_time, finish_time):
|
||||
for timestamp, state in db.select_host_overload(
|
||||
host_id, start_time, finish_time):
|
||||
if prev_timestamp:
|
||||
states[prev_state].append(total_seconds(timestamp - prev_timestamp))
|
||||
states[prev_state].append(
|
||||
total_seconds(timestamp - prev_timestamp))
|
||||
prev_timestamp = timestamp
|
||||
prev_state = state
|
||||
states[prev_state].append(total_seconds(finish_time - prev_timestamp))
|
||||
#print states
|
||||
states[prev_state].append(
|
||||
total_seconds(finish_time - prev_timestamp))
|
||||
nonoverload_time = sum(states[0])
|
||||
overload_time = sum(states[1])
|
||||
total_time += nonoverload_time + overload_time
|
||||
@ -78,4 +77,5 @@ for hostname, host_id in db.select_host_ids().items():
|
||||
|
||||
print "Total time: " + str(total_time)
|
||||
print "Overload time: " + str(total_overload_time)
|
||||
print "Overload time fraction: " + str(float(total_overload_time) / (total_time - total_idle_time))
|
||||
print "Overload time fraction: " + str(
|
||||
float(total_overload_time) / (total_time - total_idle_time))
|
||||
|
@ -14,13 +14,11 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import sys
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import time
|
||||
from datetime import datetime
|
||||
from db import init_db
|
||||
from db_utils import init_db
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
if len(sys.argv) < 5:
|
||||
print 'You must specify 4 arguments:'
|
||||
@ -30,12 +28,12 @@ if len(sys.argv) < 5:
|
||||
print '4. The finish datetime in the format: %Y-%m-%d %H:%M:%S'
|
||||
sys.exit(1)
|
||||
|
||||
db = init_db('mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
|
||||
db = init_db(
|
||||
'mysql://' + sys.argv[1] + ':' + sys.argv[2] + '@localhost/spe')
|
||||
start_time = datetime.fromtimestamp(
|
||||
time.mktime(time.strptime(sys.argv[3], '%Y-%m-%d %H:%M:%S')))
|
||||
finish_time = datetime.fromtimestamp(
|
||||
time.mktime(time.strptime(sys.argv[4], '%Y-%m-%d %H:%M:%S')))
|
||||
|
||||
#print "Start time: " + str(start_time)
|
||||
#print "Finish time: " + str(finish_time)
|
||||
print "VM migrations: " + str(len(db.select_vm_migrations(start_time, finish_time)))
|
||||
print "VM migrations: " + str(
|
||||
len(db.select_vm_migrations(start_time, finish_time)))
|
||||
|
@ -1,16 +1,28 @@
|
||||
# The order of packages is significant, because pip processes them in the order
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
hacking>=0.9.2,<0.10
|
||||
coverage>=3.6
|
||||
pyflakes==0.8.1
|
||||
|
||||
hacking<0.11,>=0.10.2
|
||||
coverage>=3.6 # Apache-2.0
|
||||
fixtures>=1.3.1 # Apache-2.0/BSD
|
||||
mock>=1.2 # BSD
|
||||
python-subunit>=0.0.18
|
||||
psycopg2>=2.5 # LGPL/ZPL
|
||||
PyMySQL>=0.6.2 # MIT License
|
||||
requests-mock>=0.7.0 # Apache-2.0
|
||||
pylint==1.4.1 # GNU GPL v2
|
||||
sphinx>=1.1.2,!=1.2.0,!=1.3b1,<1.3
|
||||
unittest2
|
||||
oslotest>=1.5.1 # Apache-2.0
|
||||
oslosphinx>=2.5.0 # Apache-2.0
|
||||
sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 # BSD
|
||||
oslosphinx!=3.4.0,>=2.5.0 # Apache-2.0
|
||||
oslotest>=1.10.0 # Apache-2.0
|
||||
os-testr>=0.4.1 # Apache-2.0
|
||||
testrepository>=0.0.18 # Apache-2.0/BSD
|
||||
testresources>=0.2.4 # Apache-2.0/BSD
|
||||
testtools>=1.4.0 # MIT
|
||||
testscenarios>=0.4
|
||||
tempest-lib>=0.13.0 # Apache-2.0
|
||||
bandit>=0.13.2 # Apache-2.0
|
||||
openstackdocstheme>=1.0.3 # Apache-2.0
|
||||
sphinxcontrib-pecanwsme>=0.8
|
||||
sphinxcontrib-httpdomain
|
||||
mock
|
||||
flake8
|
||||
unittest
|
||||
unittest2
|
||||
|
20
tools/flake8wrap.sh
Normal file
20
tools/flake8wrap.sh
Normal file
@ -0,0 +1,20 @@
|
||||
#!/bin/sh
|
||||
#
|
||||
# A simple wrapper around flake8 which makes it possible
|
||||
# to ask it to only verify files changed in the current
|
||||
# git HEAD patch.
|
||||
#
|
||||
# Intended to be invoked via tox:
|
||||
#
|
||||
# tox -epep8 -- -HEAD
|
||||
#
|
||||
|
||||
if test "x$1" = "x-HEAD" ; then
|
||||
shift
|
||||
files=$(git diff --name-only HEAD~1 | tr '\n' ' ')
|
||||
echo "Running flake8 on ${files}"
|
||||
diff -u --from-file /dev/null ${files} | flake8 --diff "$@"
|
||||
else
|
||||
echo "Running flake8 on all files"
|
||||
exec flake8 "$@"
|
||||
fi
|
16
tools/pretty_tox.sh
Normal file
16
tools/pretty_tox.sh
Normal file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -o pipefail
|
||||
|
||||
TESTRARGS=$1
|
||||
|
||||
# --until-failure is not compatible with --subunit see:
|
||||
#
|
||||
# https://bugs.launchpad.net/testrepository/+bug/1411804
|
||||
#
|
||||
# this work around exists until that is addressed
|
||||
if [[ "$TESTARGS" =~ "until-failure" ]]; then
|
||||
python setup.py testr --slowest --testr-args="$TESTRARGS"
|
||||
else
|
||||
python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f
|
||||
fi
|
50
tox.ini
50
tox.ini
@ -1,34 +1,42 @@
|
||||
[tox]
|
||||
envlist = py27,py34,linters
|
||||
minversion = 1.6
|
||||
envlist = py27,linters
|
||||
skipsdist = True
|
||||
|
||||
[testenv]
|
||||
setenv = VIRTUAL_ENV={envdir}
|
||||
NOSE_WITH_OPENSTACK=1
|
||||
NOSE_OPENSTACK_COLOR=1
|
||||
NOSE_OPENSTACK_RED=0.05
|
||||
NOSE_OPENSTACK_YELLOW=0.025
|
||||
NOSE_OPENSTACK_SHOW_ELAPSED=1
|
||||
NOSE_OPENSTACK_STDOUT=1
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands =
|
||||
/usr/bin/find . -type f -name "*.pyc" -delete
|
||||
nosetests -v {posargs}
|
||||
whitelist_externals = *
|
||||
sitepackages = True
|
||||
usedevelop = True
|
||||
install_command = pip install -U --force-reinstall {opts} {packages}
|
||||
setenv =
|
||||
VIRTUAL_ENV={envdir}
|
||||
deps =
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = python setup.py testr --slowest --testr-args='{posargs}'
|
||||
whitelist_externals = rm
|
||||
|
||||
[testenv:cover]
|
||||
commands = python setup.py testr --coverage --testr-args='{posargs}'
|
||||
|
||||
[testenv:linters]
|
||||
commands = flake8 {posargs}
|
||||
commands =
|
||||
bash tools/flake8wrap.sh {posargs}
|
||||
|
||||
[testenv:venv]
|
||||
commands = {posargs}
|
||||
|
||||
[testenv:linters]
|
||||
commands = flake8
|
||||
distribute = false
|
||||
[testenv:docs]
|
||||
commands = python setup.py build_sphinx
|
||||
|
||||
[flake8]
|
||||
ignore = H703,H102,E265,E262,H233
|
||||
show-source = true
|
||||
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,build,setup.py,tests/ci/*,scripts/*
|
||||
# E125 is deliberately excluded. See https://github.com/jcrocholl/pep8/issues/126
|
||||
# The rest of the ignores are TODOs
|
||||
# New from hacking 0.9: E129, E131, H407, H405
|
||||
# E251 Skipped due to https://github.com/jcrocholl/pep8/issues/301
|
||||
|
||||
ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,D100,D101,D102,D103,D104,D105,D200,D202,D203,D204,D205,D208,D211,D301,D400,D401,H233
|
||||
exclude = .venv,.git,.tox,dist,doc,*openstack/common/*,*lib/python*,*egg,build,tools/
|
||||
# To get a list of functions that are more complex than 25, set max-complexity
|
||||
# to 25 and run 'tox -epep8'.
|
||||
# 34 is currently the most complex thing we have
|
||||
# TODO(jogo): get this number down to 25 or so
|
||||
max-complexity=35
|
||||
|
Loading…
Reference in New Issue
Block a user