Refactor Optimizer

Part of Support Nested Stacks and Updates story

To add nested stack support to Valet, make up for missing Heat resource
Orchestration IDs in nested resources by generating a subset of Heat stack
lifecycle scheduler hints for each resource in advance, store them as opaque
metadata in Valet, then leverage the metadata at Nova scheduling time. Make
additional accommodations in anticipation of complexities brought about by
adding support for stack updates.

Change-Id: Ifed5b0f8172e522caf7e520f8131f23d4d336f4f
Story: #2001139
Task: #4855
This commit is contained in:
Joe D'Andrea 2017-08-02 12:18:26 -05:00 committed by Chris
parent 65d93c4ed5
commit 3aa88d65ad
47 changed files with 5122 additions and 3797 deletions

1
.gitignore vendored
View File

@ -62,6 +62,7 @@ pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
cover/
.tox/
.coverage
.cache

0
run_all_tests.sh Normal file → Executable file
View File

0
run_examples.sh Normal file → Executable file
View File

0
run_test.sh Normal file → Executable file
View File

0
run_until_fail.sh Normal file → Executable file
View File

View File

@ -12,7 +12,7 @@ CREATE TABLE IF NOT EXISTS #VALET_KEYSPACE#.oslo_messages ("timestamp" text PRIM
CREATE TABLE IF NOT EXISTS #VALET_KEYSPACE#.plans (id text PRIMARY KEY, name text, stack_id text);
CREATE TABLE IF NOT EXISTS #VALET_KEYSPACE#.uuid_map (uuid text PRIMARY KEY, h_uuid text, s_uuid text);
CREATE TABLE IF NOT EXISTS #VALET_KEYSPACE#.uuid_map (uuid text PRIMARY KEY, metadata text);
CREATE TABLE IF NOT EXISTS #VALET_KEYSPACE#.app (stack_id text PRIMARY KEY, app text);
@ -31,3 +31,5 @@ CREATE INDEX IF NOT EXISTS ON #VALET_KEYSPACE#.placements (plan_id);
CREATE INDEX IF NOT EXISTS ON #VALET_KEYSPACE#.placements (orchestration_id);
CREATE INDEX IF NOT EXISTS ON #VALET_KEYSPACE#.placements (resource_id);
CREATE INDEX IF NOT EXISTS ON #VALET_KEYSPACE#.groups (name);

22
tox.ini
View File

@ -1,7 +1,7 @@
[tox]
minversion = 2.3.1
skipsdist = True
envlist = docs,py27
envlist = docs,py27,pep8
[testenv]
usedevelop = True
@ -13,8 +13,16 @@ commands =
ostestr --slowest '{posargs}'
deps = -r{toxinidir}/test-requirements.txt
whitelist_externals =
bash
find
[testenv:debug]
commands = oslo_debug_helper -t valet/tests/unit {posargs}
[testenv:debug-py27]
basepython = python2.7
commands = oslo_debug_helper -t valet/tests/unit {posargs}
[testenv:pep8]
basepython = python2.7
deps = {[testenv]deps}
@ -29,10 +37,13 @@ setenv = VIRTUAL_ENV={envdir}
commands = python setup.py testr --slowest --testr-args='{posargs}'
[testenv:cover]
# Do NOT run test_coverage_ext tests while gathering coverage.
# Those tests conflict with coverage.
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=valet/tests/unit/
OS_TEST_PATH=valet/tests/unit
commands =
coverage erase
find . -type f -name "*.pyc" -delete
python setup.py test --slowest --coverage --coverage-package-name 'valet' --testr-args='{posargs}'
coverage html
coverage report
@ -53,13 +64,14 @@ commands =
[flake8]
filename = *.py
show-source = true
show-source = True
# E123, E125 skipped as they are invalid PEP-8.
# D100: Missing docstring in public module
# D101: Missing docstring in public class
# D102: Missing docstring in public method
# D103: Missing docstring in public function
# D104: Missing docstring in public package
# D203: 1 blank line required before class docstring (deprecated in pep257)
ignore = D100,D101,D102,D103,D104,D203
ignore = D100,D101,D102,D103,D104,D203,E123,E125,E501,H401,H405,H105,H301
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build

View File

@ -70,6 +70,7 @@ music_opts = [
cfg.StrOpt('resource_index_table', default='resource_log_index'),
cfg.StrOpt('app_index_table', default='app_log_index'),
cfg.StrOpt('uuid_table', default='uuid_map'),
cfg.StrOpt('group_table', default='groups'),
cfg.IntOpt('music_server_retries', default=3),
]

View File

@ -59,11 +59,14 @@ engine_opts = [
default='a,c,u,f,o,p,s',
help='Indicates the node type'),
cfg.IntOpt('compute_trigger_frequency',
default=1800,
default=3600,
help='Frequency for checking compute hosting status'),
cfg.IntOpt('topology_trigger_frequency',
default=3600,
default=7200,
help='Frequency for checking datacenter topology'),
cfg.IntOpt('metadata_trigger_frequency',
default=1200,
help='Frequency for checking metadata'),
cfg.IntOpt('update_batch_wait',
default=600,
help='Wait time before start resource synch from Nova'),
@ -76,13 +79,13 @@ engine_opts = [
default=1,
help='Default disk overbooking ratios'),
cfg.FloatOpt('static_cpu_standby_ratio',
default=20,
default=0,
help='Percentages of standby cpu resources'),
cfg.FloatOpt('static_mem_standby_ratio',
default=20,
default=0,
help='Percentages of standby mem resources'),
cfg.FloatOpt('static_local_disk_standby_ratio',
default=20,
default=0,
help='Percentages of disk standby esources'),
] + logger_conf("engine")

View File

@ -21,13 +21,12 @@ import time
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology import AppTopology
from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.app_manager.application import App
LOG = log.getLogger(__name__)
class AppHistory(object):
"""Data container for scheduling decisions."""
def __init__(self, _key):
self.decision_key = _key
@ -37,66 +36,665 @@ class AppHistory(object):
class AppHandler(object):
"""App Handler Class.
This class handles operations for the management of applications.
"""This class handles operations for the management of applications.
Functions related to adding apps and adding/removing them from
placement and updating topology info.
"""
def __init__(self, _resource, _db, _config):
def __init__(self, _placement_handler, _metadata, _resource, _db, _config):
"""Init App Handler Class."""
self.phandler = _placement_handler
self.resource = _resource
self.db = _db
self.config = _config
""" current app requested, a temporary copy """
self.metadata = _metadata
# current app requested, a temporary copy
# key= stack_id, value = AppTopology instance
self.apps = {}
self.max_app_cache = 500
self.min_app_cache = 100
self.decision_history = {}
self.max_decision_history = 5000
self.min_decision_history = 1000
self.status = "success"
def set_app(self, _app):
"""Validate app placement request and extract info for placement
decision.
"""
app_topology = AppTopology(self.phandler, self.resource, self.db)
app_topology.init_app(_app)
if app_topology.status != "success":
LOG.error(app_topology.status)
return app_topology
LOG.info("Received {} for app {} ".format(app_topology.action,
app_topology.app_id))
if app_topology.action == "create":
return self._set_app_for_create(_app, app_topology)
elif app_topology.action == "replan":
return self._set_app_for_replan(_app, app_topology)
elif app_topology.action == "migrate":
return self._set_app_for_replan(_app, app_topology)
elif app_topology.action == "identify":
return self._set_app_for_identify(_app, app_topology)
elif app_topology.action == "update":
return self._set_app_for_update(_app, app_topology)
return app_topology
def _set_app_for_create(self, _app, _app_topology):
"""Set for stack-creation or single server creation (ad-hoc) requests.
"""
if self._set_flavor_properties(_app_topology) is False:
LOG.error(_app_topology.status)
return _app_topology
LOG.debug("done setting flavor properties")
if _app_topology.set_app_topology_properties(_app) is False:
if _app_topology.status == "success":
return None
else:
LOG.error(_app_topology.status)
return _app_topology
# for case of ad-hoc create or update
if len(_app_topology.candidate_list_map) > 0:
# FIXME(gjung): the key might not be the uuid, but orch_id
uuid = _app_topology.candidate_list_map.keys()[0]
placement = self.phandler.get_placement(uuid)
if placement is None:
return None
if placement.uuid != "none":
LOG.info("change 'ad-hoc' to 'replan'")
# FIXME(gjung):
# if placement.stack_id and placement.orch_id
# if placement.stack_id == _app_topology.app_id
# then, this should be merged into the original stack
# otherwise, a seperate stack
# update placement.stack_id
# remove it from the original stack?
# update orch_id in resource status
# else (i.e., pre-valet placement)
self._set_app_for_ad_hoc_update(placement, _app_topology)
if _app_topology.status is None:
return None
elif _app_topology.status != "success":
LOG.error(_app_topology.status)
return _app_topology
# NOTE(gjung): if placement does not exist,
# check if _app_topology.app_id exists
# then merge into the stack
# otherwise, a seperate stack
LOG.debug("done setting app properties")
if _app_topology.parse_app_topology() is False:
if _app_topology.status == "success":
return None
else:
LOG.error(_app_topology.status)
return _app_topology
LOG.debug("done parsing app")
return _app_topology
def _set_app_for_ad_hoc_update(self, _placement, _app_topology):
"Set prior allocation info."
if _placement.uuid not in _app_topology.stack["placements"].keys():
_app_topology.status = "find unknown orch_id while ad-hoc update"
return
_app_topology.stack["placements"][_placement.uuid]["properties"]["host"] = _placement.host
_app_topology.stack["placements"][_placement.uuid]["resource_id"] = _placement.uuid
_app_topology.id_map[_placement.uuid] = _placement.uuid
_app_topology.action = "replan"
flavor_id = None
if _placement.stack_id is None or _placement.stack_id == "none":
if _placement.host in self.resource.hosts.keys():
host = self.resource.hosts[_placement.host]
vm_info = host.get_vm_info(uuid=_placement.uuid)
if vm_info is not None:
if "flavor_id" in vm_info.keys():
flavor_id = vm_info["flavor_id"]
else:
_app_topology.status = "missing vm while ad-hoc update"
return
else:
_app_topology.status = "missing host while ad-hoc update"
return
else:
(old_placements, old_groups) = self.get_stack(_placement.stack_id)
if old_placements is None:
_app_topology.status = None
return
elif len(old_placements) == 0:
_app_topology.status = "missing prior stack while ad-hoc updt."
return
flavor_id = old_placements[_placement.orch_id]["properties"]["flavor"]
if flavor_id is None:
_app_topology.status = "missing vm flavor info while ad-hoc updt."
return
old_vm_alloc = {}
old_vm_alloc["host"] = _placement.host
(flavor, status) = self._get_flavor(flavor_id)
if flavor is None:
_app_topology.status = status
return
old_vm_alloc["vcpus"] = flavor.vCPUs
old_vm_alloc["mem"] = flavor.mem_cap
old_vm_alloc["local_volume"] = flavor.disk_cap
_app_topology.old_vm_map[_placement.uuid] = old_vm_alloc
self.phandler.update_placement(_placement.uuid,
stack_id=_app_topology.app_id,
orch_id=_placement.uuid,
state='rebuilding')
self.phandler.set_original_host(_placement.uuid)
def _set_app_for_replan(self, _app, _app_topology):
"""Set for migration request or re-scheduling prior placement due to
conflict.
"""
(placements, groups) = self.get_placements(_app_topology)
if placements is None:
return None
elif len(placements) == 0:
return _app_topology
_app_topology.stack["placements"] = placements
_app_topology.stack["groups"] = groups
LOG.debug("done getting stack")
# check if mock id was used, then change to the real orch_id
if "mock_id" in _app.keys():
if _app["mock_id"] is not None and _app["mock_id"] != "none":
status = self._change_orch_id(_app, _app_topology)
if status != "success":
return _app_topology
LOG.debug("done replacing mock id")
if _app_topology.set_app_topology_properties(_app) is False:
if _app_topology.status == "success":
return None
else:
LOG.error(_app_topology.status)
return _app_topology
LOG.debug("done setting stack properties")
if _app_topology.parse_app_topology() is False:
if _app_topology.status == "success":
return None
else:
LOG.error(_app_topology.status)
return _app_topology
LOG.debug("done parsing stack")
return _app_topology
def _set_app_for_identify(self, _app, _app_topology):
"""Set for the confirmation with physical uuid of scheduling decision
match.
"""
(placements, groups) = self.get_placements(_app_topology)
if placements is None:
return None
elif len(placements) == 0:
return _app_topology
_app_topology.stack["placements"] = placements
_app_topology.stack["groups"] = groups
LOG.debug("done getting stack")
# check if mock id was used, then change to the real orch_id
if "mock_id" in _app.keys():
if _app["mock_id"] is not None and _app["mock_id"] != "none":
status = self._change_orch_id(_app, _app_topology)
if status != "success":
return _app_topology
LOG.debug("done replacing mock id")
return _app_topology
def _set_app_for_update(self, _app, _app_topology):
"""Set for stack-update request."""
if self._set_flavor_properties(_app_topology) is False:
LOG.error(_app_topology.status)
return _app_topology
LOG.debug("done setting vm properties")
(old_placements, old_groups) = self.get_placements(_app_topology)
if old_placements is None:
return None
if "original_resources" in _app.keys():
if len(old_placements) == 0:
old_placements = _app["original_resources"]
if len(old_placements) == 0:
if _app_topology.status == "success":
_app_topology.status = "cannot find prior stack for update"
return _app_topology
LOG.debug("done getting old stack")
# NOTE(gjung): old placements info can be stale.
for rk, r in old_placements.iteritems():
if r["type"] == "OS::Nova::Server":
if "resource_id" in r.keys():
uuid = r["resource_id"]
placement = self.phandler.get_placement(uuid)
if placement is None:
return None
elif placement.uuid == "none":
LOG.warn("vm (" + rk + ") in original stack missing. "
"Perhaps it was deleted?")
if rk in _app_topology.stack["placements"].keys():
del _app_topology.stack["placements"][rk]
continue
if rk in _app_topology.stack["placements"].keys():
if placement.stack_id is None or \
placement.stack_id == "none" or \
placement.stack_id != _app_topology.app_id:
if placement.stack_id is None or \
placement.stack_id == "none":
LOG.warn("stack id in valet record is unknown")
else:
LOG.warn("stack id in valet record is "
"different")
curr_state = None
if placement.state is None or \
placement.state == "none":
curr_state = "created"
else:
curr_state = placement.state
self.phandler.update_placement(uuid,
stack_id=_app_topology.app_id,
orch_id=rk,
state=curr_state)
self._apply_meta_change(rk, r, _app_topology.stack["placements"])
_app_topology.update_placement_vm_host(rk,
placement.host)
if "resource_id" not in _app_topology.stack["placements"][rk].keys():
_app_topology.stack["placements"][rk]["resource_id"] = uuid
else:
if placement.stack_id is not None and \
placement.stack_id != "none":
self.phandler.update_placement(uuid,
stack_id="none",
orch_id="none")
host = self.resource.hosts[placement.host]
vm_info = host.get_vm_info(uuid=placement.uuid)
if "flavor_id" not in vm_info.keys():
(flavor, status) = self._get_flavor(r["properties"]["flavor"])
if flavor is not None:
vm_info["flavor_id"] = flavor.flavor_id
else:
_app_topology.status = status
return _app_topology
else:
LOG.warn("vm (" + rk + ") in original stack does not have"
" uuid")
if old_groups is not None and len(old_groups) > 0:
for gk, g in old_groups.iteritems():
if "host" in g.keys():
_app_topology.update_placement_group_host(gk, g["host"])
LOG.debug("done setting stack update")
if _app_topology.set_app_topology_properties(_app) is False:
if _app_topology.status == "success":
return None
else:
LOG.error(_app_topology.status)
return _app_topology
for rk, vm_alloc in _app_topology.old_vm_map.iteritems():
old_r = old_placements[rk]
vcpus = 0
mem = 0
local_volume = 0
if "vcpus" not in old_r["properties"].keys():
(flavor, status) = self._get_flavor(old_r["properties"]["flavor"])
if flavor is None:
_app_topology.status = status
return _app_topology
else:
vcpus = flavor.vCPUs
mem = flavor.mem_cap
local_volume = flavor.disk_cap
else:
vcpus = old_r["properties"]["vcpus"]
mem = old_r["properties"]["mem"]
local_volume = old_r["properties"]["local_volume"]
if vm_alloc["vcpus"] != vcpus or \
vm_alloc["mem"] != mem or \
vm_alloc["local_volume"] != local_volume:
old_vm_alloc = {}
old_vm_alloc["host"] = vm_alloc["host"]
old_vm_alloc["vcpus"] = vcpus
old_vm_alloc["mem"] = mem
old_vm_alloc["local_volume"] = local_volume
_app_topology.old_vm_map[rk] = old_vm_alloc
# FIXME(gjung): the case of that vms seen in new stack but not in old
# stack
LOG.debug("done setting stack properties")
if _app_topology.parse_app_topology() is False:
if _app_topology.status == "success":
return None
else:
LOG.error(_app_topology.status)
return _app_topology
LOG.debug("done getting stack")
return _app_topology
def _set_flavor_properties(self, _app_topology):
"""Set flavor's properties."""
for rk, r in _app_topology.stack["placements"].iteritems():
if r["type"] == "OS::Nova::Server":
(flavor, status) = self._get_flavor(r["properties"]["flavor"])
if flavor is None:
_app_topology.status = status
return False
r["properties"]["vcpus"] = flavor.vCPUs
r["properties"]["mem"] = flavor.mem_cap
r["properties"]["local_volume"] = flavor.disk_cap
if len(flavor.extra_specs) > 0:
extra_specs = {}
for mk, mv in flavor.extra_specs.iteritems():
extra_specs[mk] = mv
r["properties"]["extra_specs"] = extra_specs
return True
def _change_orch_id(self, _app, _app_topology):
"""Replace mock orch_id before setting application."""
m_id = _app["mock_id"]
o_id = _app["orchestration_id"]
u_id = _app["resource_id"]
if not _app_topology.change_orch_id(m_id, o_id):
LOG.error(_app_topology.status)
return _app_topology.status
host_name = _app_topology.get_placement_host(o_id)
if host_name == "none":
_app_topology.status = "allocated host not found while changing mock id"
LOG.error(_app_topology.status)
return _app_topology.status
else:
if host_name in self.resource.hosts.keys():
host = self.resource.hosts[host_name]
vm_info = host.get_vm_info(orch_id=m_id)
if vm_info is None:
_app_topology.status = "vm not found while changing mock id"
LOG.error(_app_topology.status)
return _app_topology.status
else:
vm_info["orch_id"] = o_id
self.resource.update_orch_id_in_groups(o_id, u_id, host)
else:
_app_topology.status = "host is not found while changing mock id"
LOG.error(_app_topology.status)
return _app_topology.status
placement = self.phandler.get_placement(u_id)
if placement is None:
return None
if placement.uuid != "none":
if placement.orch_id is not None and placement.orch_id != "none":
if placement.orch_id == m_id:
placement.orch_id = o_id
if not self.phandler.store_placement(u_id, placement):
return None
return "success"
def _get_flavor(self, _flavor_name):
"""Get flavor."""
status = "success"
flavor = self.resource.get_flavor(_flavor_name)
if flavor is None:
LOG.warn("not exist flavor (" + _flavor_name + ") and try to "
"refetch")
if not self.metadata.set_flavors():
status = "failed to read flavors from nova"
return (None, status)
flavor = self.resource.get_flavor(_flavor_name)
if flavor is None:
status = "net exist flavor (" + _flavor_name + ")"
return (None, status)
return (flavor, status)
def _apply_meta_change(self, _rk, _r, _placements):
"""Check if image or flavor is changed in the update request."""
if _rk in _placements.keys():
r = _placements[_rk]
if r["properties"]["flavor"] != _r["properties"]["flavor"]:
self.phandler.update_placement(_r["resource_id"],
state="rebuilding")
self.phandler.set_original_host(_r["resource_id"])
# NOTE(gjung): Nova & Heat does not re-schedule if image is changed
if r["properties"]["image"] != _r["properties"]["image"]:
self.phandler.update_placement(_r["resource_id"],
state="rebuild")
def get_placements(self, _app_topology):
"""Get prior stack/app placements info from db or cache."""
(placements, groups) = self.get_stack(_app_topology.app_id)
if placements is None:
return (None, None)
elif len(placements) == 0:
_app_topology.status = "no app/stack record"
return ({}, {})
return (placements, groups)
def get_stack(self, _stack_id):
"""Get stack info from db or cache."""
placements = {}
groups = {}
if _stack_id in self.apps.keys():
placements = self.apps[_stack_id].stack["placements"]
groups = self.apps[_stack_id].stack["groups"]
LOG.debug("hit stack cache")
else:
stack = self.db.get_stack(_stack_id)
if stack is None:
return (None, None)
elif len(stack) == 0:
return ({}, {})
placements = stack["resources"]
if "groups" in stack.keys() and stack["groups"] is not None:
groups = stack["groups"]
return (placements, groups)
def store_app(self, _app_topology):
"""Store and cache app placement results."""
if _app_topology.action == "ping":
return True
_app_topology.timestamp_scheduled = self.resource.current_timestamp
if not _app_topology.store_app():
return False
if len(self.apps) > self.max_app_cache:
self._flush_app_cache()
self.apps[_app_topology.app_id] = _app_topology
self.phandler.flush_cache()
return True
def update_stack(self, _stack_id, orch_id=None, uuid=None, host=None):
"""Update the uuid or host of vm in stack in db and cache."""
(placements, groups) = self.get_stack(_stack_id)
if placements is None:
return (None, None)
elif len(placements) == 0:
return ("none", "none")
placement = None
if orch_id is not None:
if orch_id in placements.keys():
placement = placements[orch_id]
elif uuid is not None:
for rk, r in placements.iteritems():
if "resource_id" in r.keys() and uuid == r["resource_id"]:
placement = r
break
if placement is not None:
if uuid is not None:
placement["resource_id"] = uuid
if host is not None:
placement["properties"]["host"] = host
if not self.db.update_stack(_stack_id, orch_id=orch_id, uuid=uuid,
host=host, time=time.time()):
return (None, None)
return (placement["resource_id"], placement["properties"]["host"])
else:
return ("none", "none")
def delete_from_stack(self, _stack_id, orch_id=None, uuid=None):
"""Delete a placement from stack in db and cache."""
if _stack_id in self.apps.keys():
app_topology = self.apps[_stack_id]
if orch_id is not None:
del app_topology.stack["placements"][orch_id]
app_topology.timestamp_scheduled = time.time()
elif uuid is not None:
pk = None
for rk, r in app_topology.stack["placements"].iteritems():
if "resource_id" in r.keys() and uuid == r["resource_id"]:
pk = rk
break
if pk is not None:
del app_topology.stack["placements"][pk]
app_topology.timestamp_scheduled = time.time()
if not self.db.delete_placement_from_stack(_stack_id,
orch_id=orch_id,
uuid=uuid,
time=time.time()):
return False
return True
# NOTE(GJ): do not cache migration decision
def check_history(self, _app):
"""Check if 'create' or 'replan' is determined already."""
stack_id = _app["stack_id"]
action = _app["action"]
decision_key = None
if action == "create":
decision_key = stack_id + ":" + action + ":none"
if decision_key in self.decision_history.keys():
return (decision_key,
self.decision_history[decision_key].result)
else:
return (decision_key, None)
elif action == "replan":
msg = "%s:%s:%s"
decision_key = msg % (stack_id, action, _app["orchestration_id"])
if decision_key in self.decision_history.keys():
return (decision_key,
self.decision_history[decision_key].result)
else:
return (decision_key, None)
decision_key = stack_id + ":" + action + ":" + _app["resource_id"]
else:
return (None, None)
def put_history(self, _decision_key, _result):
decision_key_list = _decision_key.split(":")
action = decision_key_list[1]
if decision_key in self.decision_history.keys():
return (decision_key, self.decision_history[decision_key].result)
else:
return (decision_key, None)
def record_history(self, _decision_key, _result):
"""Record an app placement decision."""
decision_key_element_list = _decision_key.split(":")
action = decision_key_element_list[1]
if action == "create" or action == "replan":
if len(self.decision_history) > self.max_decision_history:
self._flush_decision_history()
app_history = AppHistory(_decision_key)
app_history.result = _result
app_history.timestamp = time.time()
self.decision_history[_decision_key] = app_history
if len(self.decision_history) > self.max_decision_history:
self._clean_decision_history()
def _flush_decision_history(self):
"""Unload app placement decisions."""
def _clean_decision_history(self):
count = 0
num_of_removes = len(self.decision_history) - self.min_decision_history
remove_item_list = []
for decision in (sorted(self.decision_history.values(),
key=operator.attrgetter('timestamp'))):
@ -104,256 +702,22 @@ class AppHandler(object):
count += 1
if count == num_of_removes:
break
for dk in remove_item_list:
if dk in self.decision_history.keys():
del self.decision_history[dk]
def add_app(self, _app):
"""Add app and set or regenerate topology, return updated topology."""
self.apps.clear()
def _flush_app_cache(self):
"""Unload app topologies."""
app_topology = AppTopology(self.resource)
count = 0
num_of_removes = len(self.apps) - self.min_app_cache
stack_id = None
if "stack_id" in _app.keys():
stack_id = _app["stack_id"]
else:
stack_id = "none"
remove_item_list = []
for app in (sorted(self.apps.values(), key=operator.attrgetter('timestamp_scheduled'))):
remove_item_list.append(app.app_id)
count += 1
if count == num_of_removes:
break
application_name = None
if "application_name" in _app.keys():
application_name = _app["application_name"]
else:
application_name = "none"
action = _app["action"]
if action == "replan" or action == "migrate":
re_app = self._regenerate_app_topology(stack_id, _app,
app_topology, action)
if re_app is None:
self.apps[stack_id] = None
msg = "cannot locate the original plan for stack = %s"
self.status = msg % stack_id
return None
if action == "replan":
LOG.info("got replan: " + stack_id)
elif action == "migrate":
LOG.info("got migration: " + stack_id)
app_id = app_topology.set_app_topology(re_app)
if app_id is None:
LOG.error("Could not set app topology for regererated graph." +
app_topology.status)
self.status = app_topology.status
self.apps[stack_id] = None
return None
else:
app_id = app_topology.set_app_topology(_app)
if len(app_topology.candidate_list_map) > 0:
LOG.info("got ad-hoc placement: " + stack_id)
else:
LOG.info("got placement: " + stack_id)
if app_id is None:
LOG.error("Could not set app topology for app graph" +
app_topology.status)
self.status = app_topology.status
self.apps[stack_id] = None
return None
new_app = App(stack_id, application_name, action)
self.apps[stack_id] = new_app
return app_topology
def add_placement(self, _placement_map, _app_topology, _timestamp):
"""Change requested apps to scheduled and place them."""
for v in _placement_map.keys():
if self.apps[v.app_uuid].status == "requested":
self.apps[v.app_uuid].status = "scheduled"
self.apps[v.app_uuid].timestamp_scheduled = _timestamp
if isinstance(v, VM):
if self.apps[v.app_uuid].request_type == "replan":
if v.uuid in _app_topology.planned_vm_map.keys():
self.apps[v.app_uuid].add_vm(
v, _placement_map[v], "replanned")
else:
self.apps[v.app_uuid].add_vm(
v, _placement_map[v], "scheduled")
if v.uuid == _app_topology.candidate_list_map.keys()[0]:
self.apps[v.app_uuid].add_vm(
v, _placement_map[v], "replanned")
else:
self.apps[v.app_uuid].add_vm(
v, _placement_map[v], "scheduled")
# NOTE(GJ): do not handle Volume in this version
else:
if _placement_map[v] in self.resource.hosts.keys():
host = self.resource.hosts[_placement_map[v]]
if v.level == "host":
self.apps[v.app_uuid].add_vgroup(v, host.name)
else:
hg = self.resource.host_groups[_placement_map[v]]
if v.level == hg.host_type:
self.apps[v.app_uuid].add_vgroup(v, hg.name)
if self._store_app_placements() is False:
pass
def _store_app_placements(self):
# NOTE(GJ): do not track application history in this version
for appk, app in self.apps.iteritems():
json_info = app.get_json_info()
if self.db.add_app(appk, json_info) is False:
return False
return True
def remove_placement(self):
"""Remove App from placement."""
if self.db is not None:
for appk, _ in self.apps.iteritems():
if self.db.add_app(appk, None) is False:
LOG.error("AppHandler: error while adding app "
"info to MUSIC")
def get_vm_info(self, _s_uuid, _h_uuid, _host):
"""Return vm_info from database."""
vm_info = {}
if _h_uuid is not None and _h_uuid != "none" and \
_s_uuid is not None and _s_uuid != "none":
vm_info = self.db.get_vm_info(_s_uuid, _h_uuid, _host)
return vm_info
def update_vm_info(self, _s_uuid, _h_uuid):
if _h_uuid and _h_uuid != "none" and _s_uuid and _s_uuid != "none":
return self.db.update_vm_info(_s_uuid, _h_uuid)
return True
def _regenerate_app_topology(self, _stack_id, _app,
_app_topology, _action):
re_app = {}
old_app = self.db.get_app_info(_stack_id)
if old_app is None:
LOG.error("Error while getting old_app from MUSIC")
return None
elif len(old_app) == 0:
LOG.error("Cannot find the old app in MUSIC")
return None
re_app["action"] = "create"
re_app["stack_id"] = _stack_id
resources = {}
diversity_groups = {}
exclusivity_groups = {}
if "VMs" in old_app.keys():
for vmk, vm in old_app["VMs"].iteritems():
resources[vmk] = {}
resources[vmk]["name"] = vm["name"]
resources[vmk]["type"] = "OS::Nova::Server"
properties = {}
properties["flavor"] = vm["flavor"]
if vm["availability_zones"] != "none":
properties["availability_zone"] = vm["availability_zones"]
resources[vmk]["properties"] = properties
for divk, level_name in vm["diversity_groups"].iteritems():
div_id = divk + ":" + level_name
if div_id not in diversity_groups.keys():
diversity_groups[div_id] = []
diversity_groups[div_id].append(vmk)
for exk, level_name in vm["exclusivity_groups"].iteritems():
ex_id = exk + ":" + level_name
if ex_id not in exclusivity_groups.keys():
exclusivity_groups[ex_id] = []
exclusivity_groups[ex_id].append(vmk)
if _action == "replan":
if vmk == _app["orchestration_id"]:
_app_topology.candidate_list_map[vmk] = \
_app["locations"]
elif vmk in _app["exclusions"]:
_app_topology.planned_vm_map[vmk] = vm["host"]
if vm["status"] == "replanned":
_app_topology.planned_vm_map[vmk] = vm["host"]
elif _action == "migrate":
if vmk == _app["orchestration_id"]:
_app_topology.exclusion_list_map[vmk] = _app[
"excluded_hosts"]
if vm["host"] not in _app["excluded_hosts"]:
_app_topology.exclusion_list_map[vmk].append(
vm["host"])
else:
_app_topology.planned_vm_map[vmk] = vm["host"]
_app_topology.old_vm_map[vmk] = (vm["host"], vm["cpus"],
vm["mem"], vm["local_volume"])
if "VGroups" in old_app.keys():
for gk, affinity in old_app["VGroups"].iteritems():
resources[gk] = {}
resources[gk]["type"] = "ATT::Valet::GroupAssignment"
properties = {}
properties["group_type"] = "affinity"
properties["group_name"] = affinity["name"]
properties["level"] = affinity["level"]
properties["resources"] = []
for r in affinity["subvgroup_list"]:
properties["resources"].append(r)
resources[gk]["properties"] = properties
if len(affinity["diversity_groups"]) > 0:
for divk, level_name in \
affinity["diversity_groups"].iteritems():
div_id = divk + ":" + level_name
if div_id not in diversity_groups.keys():
diversity_groups[div_id] = []
diversity_groups[div_id].append(gk)
if len(affinity["exclusivity_groups"]) > 0:
for exk, level_name in \
affinity["exclusivity_groups"].iteritems():
ex_id = exk + ":" + level_name
if ex_id not in exclusivity_groups.keys():
exclusivity_groups[ex_id] = []
exclusivity_groups[ex_id].append(gk)
group_type = "ATT::Valet::GroupAssignment"
for div_id, resource_list in diversity_groups.iteritems():
divk_level_name = div_id.split(":")
resources[divk_level_name[0]] = {}
resources[divk_level_name[0]]["type"] = group_type
properties = {}
properties["group_type"] = "diversity"
properties["group_name"] = divk_level_name[2]
properties["level"] = divk_level_name[1]
properties["resources"] = resource_list
resources[divk_level_name[0]]["properties"] = properties
for ex_id, resource_list in exclusivity_groups.iteritems():
exk_level_name = ex_id.split(":")
resources[exk_level_name[0]] = {}
resources[exk_level_name[0]]["type"] = group_type
properties = {}
properties["group_type"] = "exclusivity"
properties["group_name"] = exk_level_name[2]
properties["level"] = exk_level_name[1]
properties["resources"] = resource_list
resources[exk_level_name[0]]["properties"] = properties
re_app["resources"] = resources
return re_app
for appk in remove_item_list:
del self.apps[appk]

View File

@ -12,40 +12,54 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_parser import Parser
from valet.engine.optimizer.app_manager.group import Group
from valet.engine.optimizer.app_manager.vm import VM
LOG = log.getLogger(__name__)
class AppTopology(object):
"""App Topology Class.
"""App Topology Class.Container to deliver the status of request.
This class contains functions for parsing and setting each app, as well as
calculating and setting optimization.
"""
def __init__(self, _placement_handler, _resource, _db):
self.app_id = None # stack_id
self.app_name = None
def __init__(self, _resource):
"""Init App Topology Class."""
self.vgroups = {}
self.vms = {}
# create, update, identify, replan, migrate, ping
self.action = None
self.timestamp_scheduled = 0
# for replan
self.old_vm_map = {}
self.planned_vm_map = {}
self.candidate_list_map = {}
# for migration-tip
self.exclusion_list_map = {}
# stack resources
self.stack = {}
self.phandler = _placement_handler
self.resource = _resource
self.db = _db
# restriction of host naming convention
high_level_allowed = True
if "none" in self.resource.datacenter.region_code_list:
high_level_allowed = False
# For search
# key = orch_id, value = Group instance containing sub-groups
self.groups = {}
# key = orch_id, value = VM instance
self.vms = {}
# key = orch_id, value = current placement info
self.old_vm_map = {}
# key = orch_id, value = current host
self.planned_vm_map = {}
# key = orch_id, value = candidate hosts
self.candidate_list_map = {}
# key = orch_id, value = physical uuid
self.id_map = {}
self.parser = Parser(high_level_allowed)
self.parser = Parser(self.db)
# For placement optimization
self.total_CPU = 0
self.total_mem = 0
self.total_local_vol = 0
@ -53,65 +67,202 @@ class AppTopology(object):
self.status = "success"
def set_app_topology(self, _app_graph):
"""Set app topology (Parse and set each app).
def init_app(self, _app):
"""Validate and init app info based on the original request."""
Set app topology by calling parser to determine vgroups,
vms and volumes. Then return parsed stack_id, app_name and action.
"""
(vgroups, vms) = self.parser.set_topology(_app_graph)
if "action" in _app.keys():
self.action = _app["action"]
else:
self.status = "no action type in request"
return
if len(self.parser.candidate_list_map) > 0:
self.candidate_list_map = self.parser.candidate_list_map
if "stack_id" in _app.keys():
self.app_id = _app["stack_id"]
else:
self.status = "no stack id in request"
return
if len(vgroups) == 0 and len(vms) == 0:
self.status = self.parser.status
return None
if "application_name" in _app.keys():
self.app_name = _app["application_name"]
else:
self.app_name = "none"
# cumulate virtual resources
for _, vgroup in vgroups.iteritems():
self.vgroups[vgroup.uuid] = vgroup
for _, vm in vms.iteritems():
self.vms[vm.uuid] = vm
if self.action == "create" or self.action == "update":
if "resources" in _app.keys():
self.stack["placements"] = _app["resources"]
else:
self.status = "no resources in request action {}".format(self.action)
return
return self.parser.stack_id, self.parser.application_name, \
self.parser.action
if "groups" in _app.keys():
self.stack["groups"] = _app["groups"]
if self.action in ("identify", "replan", "migrate"):
if "resource_id" in _app.keys():
if "orchestration_id" in _app.keys():
self.id_map[_app["orchestration_id"]] = _app["resource_id"]
else:
self.id_map[_app["resource_id"]] = _app["resource_id"]
else:
self.status = "no physical uuid in request action {}".format(self.action)
return
def set_app_topology_properties(self, _app):
"""Set app properties."""
if self.action == "create" and \
"locations" in _app.keys() and \
len(_app["locations"]) > 0:
if len(_app["resources"]) == 1:
# Indicate this is an ad-hoc request
self.candidate_list_map[_app["resources"].keys()[0]] = _app["locations"]
for rk, r in self.stack["placements"].iteritems():
if r["type"] == "OS::Nova::Server":
if self.action == "create":
if "locations" in r.keys() and len(r["locations"]) > 0:
# Indicate this is an ad-hoc request
self.candidate_list_map[rk] = r["locations"]
elif self.action == "replan":
if rk == _app["orchestration_id"]:
self.candidate_list_map[rk] = _app["locations"]
else:
if "resource_id" in r.keys():
placement = self.phandler.get_placement(r["resource_id"])
if placement is None:
return False
elif placement.uuid == "none":
self.status = "no record for placement for vm {}".format(rk)
return False
if placement.state not in ("rebuilding", "migrating"):
self.planned_vm_map[rk] = r["properties"]["host"]
elif self.action == "update":
if "resource_id" in r.keys():
placement = self.phandler.get_placement(r["resource_id"])
if placement is None:
return False
elif placement.uuid == "none":
self.status = "no record for placement for vm {}".format(rk)
return False
if placement.state not in ("rebuilding", "migrating"):
self.planned_vm_map[rk] = r["properties"]["host"]
elif self.action == "migrate":
if "resource_id" in r.keys():
if r["resource_id"] == _app["resource_id"]:
not_candidate_list = []
not_candidate_list.append(r["properties"]["host"])
if "excluded_hosts" in _app.keys():
for h in _app["excluded_hosts"]:
if h != r["properties"]["host"]:
not_candidate_list.append(h)
candidate_list = [hk for hk in self.resource.hosts.keys()
if hk not in not_candidate_list]
self.candidate_list_map[rk] = candidate_list
else:
self.planned_vm_map[rk] = r["properties"]["host"]
if "host" in r["properties"].keys():
vm_alloc = {}
vm_alloc["host"] = r["properties"]["host"]
vm_alloc["vcpus"] = 0
vm_alloc["mem"] = 0
vm_alloc["local_volume"] = 0
if "vcpus" in r["properties"].keys():
vm_alloc["vcpus"] = int(r["properties"]["vcpus"])
else:
self.status = "no record for cpu requirement {}".format(rk)
return False
if "mem" in r["properties"].keys():
vm_alloc["mem"] = int(r["properties"]["mem"])
else:
self.status = "no record for mem requirement {}".format(rk)
return False
if "local_volume" in r["properties"].keys():
vm_alloc["local_volume"] = int(r["properties"]["local_volume"])
else:
self.status = "no record for disk volume requirement {}".format(rk)
return False
self.old_vm_map[rk] = vm_alloc
if self.action == "replan" or self.action == "migrate":
if len(self.candidate_list_map) == 0:
self.status = "no target vm found for " + self.action
return False
return True
def change_orch_id(self, _mockup_id, _orch_id):
"""Replace mockup orch_id with the real orch_id."""
if _mockup_id in self.stack["placements"].keys():
r = self.stack["placements"][_mockup_id]
del self.stack["placements"][_mockup_id]
self.stack["placements"][_orch_id] = r
return True
else:
self.status = "mockup id does not exist in stack"
return False
def parse_app_topology(self):
"""Extract info from stack input for search."""
(self.groups, self.vms) = self.parser.set_topology(self.app_id,
self.stack)
if self.groups is None:
return False
elif len(self.groups) == 0 and len(self.vms) == 0:
self.status = "parse error while {} for {} : {}".format(self.action,
self.app_id,
self.parser.status)
return False
return True
def set_weight(self):
"""Set weight of vms and vgroups."""
"""Set relative weight of each vms and groups."""
for _, vm in self.vms.iteritems():
self._set_vm_weight(vm)
for _, vg in self.vgroups.iteritems():
for _, vg in self.groups.iteritems():
self._set_vm_weight(vg)
for _, vg in self.vgroups.iteritems():
self._set_vgroup_resource(vg)
for _, vg in self.groups.iteritems():
self._set_group_resource(vg)
for _, vg in self.vgroups.iteritems():
self._set_vgroup_weight(vg)
for _, vg in self.groups.iteritems():
self._set_group_weight(vg)
def _set_vm_weight(self, _v):
if isinstance(_v, VGroup):
for _, sg in _v.subvgroups.iteritems():
"""Set relative weight of each vm against available resource amount.
"""
if isinstance(_v, Group):
for _, sg in _v.subgroups.iteritems():
self._set_vm_weight(sg)
else:
if self.resource.CPU_avail > 0:
_v.vCPU_weight = float(_v.vCPUs) / \
float(self.resource.CPU_avail)
_v.vCPU_weight = float(_v.vCPUs) / float(self.resource.CPU_avail)
else:
_v.vCPU_weight = 1.0
self.total_CPU += _v.vCPUs
if self.resource.mem_avail > 0:
_v.mem_weight = float(_v.mem) / \
float(self.resource.mem_avail)
_v.mem_weight = float(_v.mem) / float(self.resource.mem_avail)
else:
_v.mem_weight = 1.0
self.total_mem += _v.mem
if self.resource.local_disk_avail > 0:
_v.local_volume_weight = float(_v.local_volume_size) / \
float(self.resource.local_disk_avail)
_v.local_volume_weight = float(_v.local_volume_size) / float(self.resource.local_disk_avail)
else:
if _v.local_volume_size > 0:
_v.local_volume_weight = 1.0
@ -119,56 +270,56 @@ class AppTopology(object):
_v.local_volume_weight = 0.0
self.total_local_vol += _v.local_volume_size
def _set_vgroup_resource(self, _vg):
def _set_group_resource(self, _vg):
"""Sum up amount of resources of vms for each affinity group."""
if isinstance(_vg, VM):
return
for _, sg in _vg.subvgroups.iteritems():
self._set_vgroup_resource(sg)
for _, sg in _vg.subgroups.iteritems():
self._set_group_resource(sg)
_vg.vCPUs += sg.vCPUs
_vg.mem += sg.mem
_vg.local_volume_size += sg.local_volume_size
def _set_vgroup_weight(self, _vgroup):
"""Calculate weights for vgroup."""
def _set_group_weight(self, _group):
"""Set relative weight of each affinity group against available
resource amount.
"""
if self.resource.CPU_avail > 0:
_vgroup.vCPU_weight = float(_vgroup.vCPUs) / \
float(self.resource.CPU_avail)
_group.vCPU_weight = float(_group.vCPUs) / float(self.resource.CPU_avail)
else:
if _vgroup.vCPUs > 0:
_vgroup.vCPU_weight = 1.0
if _group.vCPUs > 0:
_group.vCPU_weight = 1.0
else:
_vgroup.vCPU_weight = 0.0
_group.vCPU_weight = 0.0
if self.resource.mem_avail > 0:
_vgroup.mem_weight = float(_vgroup.mem) / \
float(self.resource.mem_avail)
_group.mem_weight = float(_group.mem) / float(self.resource.mem_avail)
else:
if _vgroup.mem > 0:
_vgroup.mem_weight = 1.0
if _group.mem > 0:
_group.mem_weight = 1.0
else:
_vgroup.mem_weight = 0.0
_group.mem_weight = 0.0
if self.resource.local_disk_avail > 0:
_vgroup.local_volume_weight = float(_vgroup.local_volume_size) / \
float(self.resource.local_disk_avail)
_group.local_volume_weight = float(_group.local_volume_size) / float(self.resource.local_disk_avail)
else:
if _vgroup.local_volume_size > 0:
_vgroup.local_volume_weight = 1.0
if _group.local_volume_size > 0:
_group.local_volume_weight = 1.0
else:
_vgroup.local_volume_weight = 0.0
_group.local_volume_weight = 0.0
for _, svg in _vgroup.subvgroups.iteritems():
if isinstance(svg, VGroup):
self._set_vgroup_weight(svg)
for _, svg in _group.subgroups.iteritems():
if isinstance(svg, Group):
self._set_group_weight(svg)
def set_optimization_priority(self):
"""Set Optimization Priority.
"""Determine the optimization priority among different types of
resources.
This function calculates weights for bandwidth, cpu, memory, local
and overall volume for an app. Then Sorts the results and sets
optimization order accordingly.
"""
if len(self.vgroups) == 0 and len(self.vms) == 0:
if len(self.groups) == 0 and len(self.vms) == 0:
return
app_CPU_weight = -1
@ -208,3 +359,160 @@ class AppTopology(object):
self.optimization_priority = sorted(opt,
key=lambda resource: resource[1],
reverse=True)
def get_placement_uuid(self, _orch_id):
"""Get the physical uuid for vm if available."""
if "resource_id" in self.stack["placements"][_orch_id].keys():
return self.stack["placements"][_orch_id]["resource_id"]
else:
return "none"
def get_placement_host(self, _orch_id):
"""Get the determined host name for vm if available."""
if "host" in self.stack["placements"][_orch_id]["properties"].keys():
return self.stack["placements"][_orch_id]["properties"]["host"]
else:
return "none"
def delete_placement(self, _orch_id):
"""Delete the placement from stack."""
if _orch_id in self.stack["placements"].keys():
del self.stack["placements"][_orch_id]
uuid = self.get_placement_uuid(_orch_id)
if uuid != "none":
if not self.phandler.delete_placement(uuid):
return False
return True
def update_placement_vm_host(self, _orch_id, _host):
"""Update host info for vm."""
if _orch_id in self.stack["placements"].keys():
self.stack["placements"][_orch_id]["properties"]["host"] = _host
if "locations" in self.stack["placements"][_orch_id].keys():
del self.stack["placements"][_orch_id]["locations"]
def update_placement_group_host(self, _orch_id, _host):
"""Update host info in affinity group."""
if _orch_id in self.stack["groups"].keys():
self.stack["groups"][_orch_id]["host"] = _host
def update_placement_state(self, _orch_id, host=None):
"""Update state and host of vm deployment."""
placement = self.stack["placements"][_orch_id]
# ad-hoc
if self.action == "create" and len(self.candidate_list_map) > 0:
placement["resource_id"] = _orch_id
if self.phandler.insert_placement(_orch_id, self.app_id, host,
_orch_id, "planned") is None:
return False
elif self.action == "replan":
if _orch_id == self.id_map.keys()[0]:
uuid = self.id_map[_orch_id]
if "resource_id" in placement.keys():
if not self._update_placement_state(uuid, host, "planned",
self.action):
return False
else:
placement["resource_id"] = uuid
if self.phandler.insert_placement(uuid, self.app_id, host,
_orch_id, "planned") is None:
return False
else:
if _orch_id not in self.planned_vm_map.keys():
if "resource_id" in placement.keys():
uuid = placement["resource_id"]
if not self._update_placement_state(uuid, host,
"planning", self.action):
return False
elif self.action == "identify":
uuid = self.id_map[_orch_id]
host = placement["properties"]["host"]
if "resource_id" in placement.keys():
if not self._update_placement_state(uuid, host,
"planned", self.action):
return False
else:
placement["resource_id"] = uuid
if self.phandler.insert_placement(uuid, self.app_id, host,
_orch_id, "planned") is None:
return False
elif self.action == "update" or self.action == "migrate":
if _orch_id not in self.planned_vm_map.keys():
if "resource_id" in placement.keys():
uuid = placement["resource_id"]
if not self._update_placement_state(uuid, host, "planning",
self.action):
return False
return True
def _update_placement_state(self, _uuid, _host, _phase, _action):
"""Determine new state depending on phase (scheduling, confirmed) and
action.
"""
placement = self.phandler.get_placement(_uuid)
if placement is None or placement.uuid == "none":
self.status = "no placement found for update"
return False
if placement.state is not None and placement.state != "none":
self.logger.debug("prior vm state = " + placement.state)
if placement.original_host is not None and \
placement.original_host != "none":
self.logger.debug("prior vm host = " + placement.original_host)
new_state = None
if _phase == "planning":
if _action == "migrate":
new_state = "migrating"
self.phandler.set_original_host(_uuid)
else:
if placement.state in ("rebuilding", "migrating"):
if placement.original_host != _host:
new_state = "migrating"
else:
new_state = "rebuilding"
elif _phase == "planned":
if placement.state in ("rebuilding", "migrating"):
if placement.original_host != _host:
new_state = "migrate"
else:
new_state = "rebuild"
else:
if _action == "identify":
new_state = "rebuild"
elif _action == "replan":
new_state = "migrate"
self.phandler.set_verified(_uuid)
self.logger.debug("new vm state = " + new_state)
self.phandler.update_placement(_uuid, host=_host, state=new_state)
return True
def store_app(self):
"""Store this app to db with timestamp."""
stack_data = {}
stack_data["stack_id"] = self.app_id
stack_data["timestamp"] = self.timestamp_scheduled
stack_data["action"] = self.action
stack_data["resources"] = self.stack["placements"]
stack_data["groups"] = self.stack["groups"]
if not self.db.store_stack(stack_data):
return False
return True

View File

@ -1,157 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App Topology Base.
This file contains different datatype base classes to be used when
buliding out app topology. These classes include VGroups, Volumes and Vms,
as well as 'Link' classes for each.
"""
LEVELS = ["host", "rack", "cluster"]
class VGroup(object):
"""VGroup Class.
This class represents a VGroup object (virtual group). It contains
data about the volumes or vms it contains (such as compute resources),
and data about the group itself (group type, etc).
"""
def __init__(self, _app_uuid, _uuid):
"""Init VGroup Class."""
self.app_uuid = _app_uuid
self.uuid = _uuid
self.name = None
self.status = "requested"
self.vgroup_type = "AFF" # Support Affinity group at this version
self.level = None # host, rack, or cluster
self.survgroup = None # where this vgroup belong to
self.subvgroups = {} # child vgroups
self.diversity_groups = {} # cumulative diversity/exclusivity group
self.exclusivity_groups = {} # over this level. key=name, value=level
self.availability_zone_list = []
self.extra_specs_list = [] # cumulative extra_specs
self.vCPUs = 0
self.mem = 0 # MB
self.local_volume_size = 0 # GB
self.vCPU_weight = -1
self.mem_weight = -1
self.local_volume_weight = -1
self.host = None
def get_json_info(self):
"""Return JSON info of VGroup Object."""
survgroup_id = None
if self.survgroup is None:
survgroup_id = "none"
else:
survgroup_id = self.survgroup.uuid
subvgroup_list = []
for vk in self.subvgroups.keys():
subvgroup_list.append(vk)
return {'name': self.name,
'status': self.status,
'vgroup_type': self.vgroup_type,
'level': self.level,
'survgroup': survgroup_id,
'subvgroup_list': subvgroup_list,
'diversity_groups': self.diversity_groups,
'exclusivity_groups': self.exclusivity_groups,
'availability_zones': self.availability_zone_list,
'extra_specs_list': self.extra_specs_list,
'cpus': self.vCPUs,
'mem': self.mem,
'local_volume': self.local_volume_size,
'cpu_weight': self.vCPU_weight,
'mem_weight': self.mem_weight,
'local_volume_weight': self.local_volume_weight,
'host': self.host}
class VM(object):
"""VM Class.
This class represents a Virtual Machine object. Examples of data this
class contains are compute resources, the host, and status.
"""
def __init__(self, _app_uuid, _uuid):
"""Init VM Class."""
self.app_uuid = _app_uuid
self.uuid = _uuid
self.name = None
self.status = "requested"
self.survgroup = None # VGroup where this vm belongs to
self.diversity_groups = {}
self.exclusivity_groups = {}
self.availability_zone = None
self.extra_specs_list = []
self.flavor = None
self.vCPUs = 0
self.mem = 0 # MB
self.local_volume_size = 0 # GB
self.vCPU_weight = -1
self.mem_weight = -1
self.local_volume_weight = -1
self.host = None # where this vm is placed
def get_json_info(self):
"""Return JSON info for VM object."""
survgroup_id = None
if self.survgroup is None:
survgroup_id = "none"
else:
survgroup_id = self.survgroup.uuid
availability_zone = None
if self.availability_zone is None:
availability_zone = "none"
else:
availability_zone = self.availability_zone
return {'name': self.name,
'status': self.status,
'survgroup': survgroup_id,
'diversity_groups': self.diversity_groups,
'exclusivity_groups': self.exclusivity_groups,
'availability_zones': availability_zone,
'extra_specs_list': self.extra_specs_list,
'flavor': self.flavor,
'cpus': self.vCPUs,
'mem': self.mem,
'local_volume': self.local_volume_size,
'cpu_weight': self.vCPU_weight,
'mem_weight': self.mem_weight,
'local_volume_weight': self.local_volume_weight,
'host': self.host}

View File

@ -24,438 +24,542 @@
OS::Heat::ResourceGroup
OS::Heat::ResourceGroup
"""
from oslo_log import log
import six
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM
import json
import six
import traceback
from oslo_log import log
from valet.engine.optimizer.app_manager.group import Group
from valet.engine.optimizer.app_manager.group import LEVEL
from valet.engine.optimizer.app_manager.vm import VM
LOG = log.getLogger(__name__)
class Parser(object):
"""Parser Class.
This class handles parsing out the data related to the desired
"""This class handles parsing out the data related to the desired
topology from a template.
not supported OS::Nova::ServerGroup OS::Heat::AutoScalingGroup
OS::Heat::Stack OS::Heat::ResourceGroup
"""
def __init__(self, _high_level_allowed):
"""Init Parser Class."""
self.high_level_allowed = _high_level_allowed
self.format_version = None
self.stack_id = None # used as application id
self.application_name = None
self.action = None # [create|update|ping]
self.candidate_list_map = {}
def __init__(self, _db):
self.db = _db
self.status = "success"
def set_topology(self, _graph):
"""Return result of set_topology which parses input to get topology."""
if "version" in _graph.keys():
self.format_version = _graph["version"]
else:
self.format_version = "0.0"
def set_topology(self, _app_id, _stack):
"""Parse stack resources to set info for search."""
if "stack_id" in _graph.keys():
self.stack_id = _graph["stack_id"]
else:
self.stack_id = "none"
if "application_name" in _graph.keys():
self.application_name = _graph["application_name"]
else:
self.application_name = "none"
if "action" in _graph.keys():
self.action = _graph["action"]
else:
self.action = "any"
if "locations" in _graph.keys() and len(_graph["locations"]) > 0:
if len(_graph["resources"]) == 1:
v_uuid = _graph["resources"].keys()[0]
self.candidate_list_map[v_uuid] = _graph["locations"]
return self._set_topology(_graph["resources"])
def _set_topology(self, _elements):
vgroups = {}
groups = {}
vms = {}
for rk, r in _elements.iteritems():
group_assignments = {}
for rk, r in _stack["placements"].iteritems():
if r["type"] == "OS::Nova::Server":
vm = VM(self.stack_id, rk)
vm = VM(_app_id, rk)
if "name" in r.keys():
vm.name = r["name"]
else:
vm.name = vm.uuid
if "resource_id" in r.keys():
vm.uuid = r["resource_id"]
if "flavor" in r["properties"].keys():
flavor_id = r["properties"]["flavor"]
if isinstance(flavor_id, six.string_types):
vm.flavor = flavor_id
else:
vm.flavor = str(flavor_id)
else:
self.status = "OS::Nova::Server flavor attribute missing"
return {}, {}
if "image" in r["properties"].keys():
image_id = r["properties"]["image"]
if isinstance(image_id, six.string_types):
vm.image = image_id
else:
vm.image = str(image_id)
else:
self.status = "OS::Nova::Server image attribute missing"
return {}, {}
if "host" in r["properties"].keys():
vm.host = r["properties"]["host"]
if "vcpus" in r["properties"].keys():
vm.vCPUs = int(r["properties"]["vcpus"])
if "mem" in r["properties"].keys():
vm.mem = int(r["properties"]["mem"])
if "local_volume" in r["properties"].keys():
vm.local_volume_size = int(r["properties"]["local_volume"])
if "extra_specs" in r["properties"].keys():
extra_specs = {}
for mk, mv in r["properties"]["extra_specs"].iteritems():
extra_specs[mk] = mv
for mk, mv in extra_specs.iteritems():
if mk == "valet":
group_list = []
if isinstance(mv, six.string_types):
try:
groups_dict = json.loads(mv)
if "groups" in groups_dict.keys():
group_list = groups_dict["groups"]
except Exception:
LOG.error("valet metadata parsing: " +
traceback.format_exc())
self.status = "wrong valet metadata format"
return {}, {}
else:
if "groups" in mv.keys():
group_list = mv["groups"]
self._assign_groups(rk, "flavor",
group_list, group_assignments)
vm.extra_specs_list.append(extra_specs)
if "metadata" in r["properties"].keys():
if "valet" in r["properties"]["metadata"].keys():
if "groups" in r["properties"]["metadata"]["valet"].keys():
group_list = r["properties"]["metadata"]["valet"]["groups"]
self._assign_groups(rk, "meta", group_list, group_assignments)
if "availability_zone" in r["properties"].keys():
az = r["properties"]["availability_zone"]
# NOTE: do not allow to specify a certain host name
vm.availability_zone = az.split(":")[0]
if "locations" in r.keys():
if len(r["locations"]) > 0:
self.candidate_list_map[rk] = r["locations"]
vms[vm.uuid] = vm
LOG.info("vm = " + vm.uuid)
vms[vm.orch_id] = vm
elif r["type"] == "OS::Cinder::Volume":
LOG.warning("Parser: do nothing for volume at this "
"version")
pass
elif r["type"] == "OS::Valet::GroupAssignment":
group_assignments[rk] = r
elif r["type"] == "ATT::Valet::GroupAssignment":
vgroup = VGroup(self.stack_id, rk)
vgroup.vgroup_type = None
if "group_type" in r["properties"].keys():
if r["properties"]["group_type"] == "affinity":
vgroup.vgroup_type = "AFF"
elif r["properties"]["group_type"] == "diversity":
vgroup.vgroup_type = "DIV"
elif r["properties"]["group_type"] == "exclusivity":
vgroup.vgroup_type = "EX"
if len(group_assignments) > 0:
groups = self._set_groups(group_assignments, _app_id, _stack)
if groups is None:
return None, None
if len(groups) == 0:
return {}, {}
if self._merge_diversity_groups(group_assignments, groups,
vms) is False:
return {}, {}
if self._merge_exclusivity_groups(group_assignments, groups,
vms) is False:
return {}, {}
if self._merge_affinity_groups(group_assignments, groups,
vms) is False:
return {}, {}
# Delete all EX and DIV groups after merging
groups = {
vgk: vg for vgk, vg in groups.iteritems() if vg.group_type != "DIV" and vg.group_type != "EX"
}
if len(groups) == 0 and len(vms) == 0:
self.status = "no vms found in stack"
return groups, vms
def _assign_groups(self, _rk, _tag, _group_list, _group_assignments):
"""Create group assignment."""
count = 0
for g_id in _group_list:
rk = "{}_{}_{}".format(_rk, _tag, str(count))
count += 1
properties = {}
properties["group"] = g_id
properties["resources"] = []
properties["resources"].append(_rk)
assignment = {}
assignment["properties"] = properties
_group_assignments[rk] = assignment
def _set_groups(self, _group_assignments, _app_id, _stack):
"""Parse valet groups for search."""
if _stack["groups"] is None:
_stack["groups"] = {}
groups = {}
for rk, assignment in _group_assignments.iteritems():
if "group" in assignment["properties"].keys():
g_id = assignment["properties"]["group"]
if g_id in _stack["groups"].keys():
group = self._make_group(_app_id, g_id, _stack["groups"][g_id])
if group is not None:
groups[group.orch_id] = group
else:
self.status = "unknown group = " + \
r["properties"]["group_type"]
return {}, {}
return {}
else:
self.status = "no group type"
return {}, {}
group_info = self.db.get_group(g_id)
if group_info is None:
return None
elif len(group_info) == 0:
self.status = "no group found"
return {}
if "group_name" in r["properties"].keys():
vgroup.name = r["properties"]["group_name"]
g = {}
g["type"] = group_info["type"]
g["name"] = group_info["name"]
g["level"] = group_info["level"]
_stack["groups"][group_info["id"]] = g
assignment["properties"]["group"] = group_info["id"]
group = self._make_group(_app_id, group_info["id"], g)
if group is not None:
groups[group.orch_id] = group
else:
if vgroup.vgroup_type == "EX":
self.status = "no exclusivity group identifier"
return {}, {}
return {}
else:
vgroup.name = "any"
self.status = "group assignment format error"
return {}
if "level" in r["properties"].keys():
vgroup.level = r["properties"]["level"]
if vgroup.level != "host":
if self.high_level_allowed is False:
self.status = ("only host level of affinity group "
"allowed due to the mis-match of "
"host naming convention")
return {}, {}
return groups
def _make_group(self, _app_id, _gk, _g):
"""Make a group object."""
group = Group(_app_id, _gk)
group.group_type = None
if "type" in _g.keys():
if _g["type"] == "affinity":
group.group_type = "AFF"
elif _g["type"] == "diversity":
group.group_type = "DIV"
elif _g["type"] == "exclusivity":
group.group_type = "EX"
else:
self.status = "no grouping level"
return {}, {}
vgroups[vgroup.uuid] = vgroup
msg = "group = %s, type = %s"
LOG.info(msg % (vgroup.name, vgroup.vgroup_type))
self.status = "unknown group type {} for group {}".format(_g["type"], _gk)
return None
else:
self.status = "no group type for group {}".format(_gk)
return None
if self._merge_diversity_groups(_elements, vgroups, vms) is False:
return {}, {}
if self._merge_exclusivity_groups(_elements, vgroups, vms) is False:
return {}, {}
if self._merge_affinity_groups(_elements, vgroups, vms) is False:
return {}, {}
if "name" in _g.keys():
group.name = _g["name"]
else:
if group.group_type == "EX":
self.status = "no exclusivity group name for group {}".format(_gk)
return None
else:
group.name = "any"
""" delete all EX and DIV vgroups after merging """
for vgk in vgroups.keys():
vg = vgroups[vgk]
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
del vgroups[vgk]
if "level" in _g.keys():
group.level = _g["level"]
else:
self.status = "no grouping level for group {}".format(_gk)
return None
return vgroups, vms
if "host" in _g.keys():
group.host = _g["host"]
def _merge_diversity_groups(self, _elements, _vgroups, _vms):
for level in LEVELS:
return group
def _merge_diversity_groups(self, _elements, _groups, _vms):
""" to merge diversity sub groups """
for level in LEVEL:
for rk, r in _elements.iteritems():
if r["type"] == "ATT::Valet::GroupAssignment" and \
r["properties"]["group_type"] == "diversity" and \
r["properties"]["level"] == level:
vgroup = _vgroups[rk]
for vk in r["properties"]["resources"]:
if vk in _vms.keys():
vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].diversity_groups[rk] = (
vgroup.level + ":" + vgroup.name)
elif vk in _vgroups.keys():
vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(level):
self.status = ("grouping scope: nested "
"group's level is higher")
return False
if (vg.vgroup_type == "DIV" or
vg.vgroup_type == "EX"):
msg = ("{0} not allowd to be nested in "
"diversity group")
self.status = msg.format(vg.vgroup_type)
return False
vgroup.subvgroups[vk] = vg
vg.diversity_groups[rk] = vgroup.level + ":" + \
vgroup.name
else:
self.status = "invalid resource = " + vk
return False
return True
group = None
def _merge_exclusivity_groups(self, _elements, _vgroups, _vms):
for level in LEVELS:
for rk, r in _elements.iteritems():
if r["type"] == "ATT::Valet::GroupAssignment" and \
r["properties"]["group_type"] == "exclusivity" and \
r["properties"]["level"] == level:
vgroup = _vgroups[rk]
for vk in r["properties"]["resources"]:
if vk in _vms.keys():
vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].exclusivity_groups[rk] = (
vgroup.level + ":" + vgroup.name)
elif vk in _vgroups.keys():
vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(level):
self.status = "grouping scope: nested " \
"group's level is higher"
return False
if (vg.vgroup_type == "DIV" or
vg.vgroup_type == "EX"):
msg = ("{0}) not allowd to be nested in "
"exclusivity group")
self.status = msg.format(vg.vgroup_type)
return False
vgroup.subvgroups[vk] = vg
vg.exclusivity_groups[rk] = vgroup.level + ":" + \
vgroup.name
else:
self.status = "invalid resource = " + vk
return False
return True
def _merge_affinity_groups(self, _elements, _vgroups, _vms):
# key is uuid of vm or vgroup & value is its parent vgroup
affinity_map = {}
for level in LEVELS:
for rk, r in _elements.iteritems():
if r["type"] == "ATT::Valet::GroupAssignment" and \
r["properties"]["group_type"] == "affinity" and \
r["properties"]["level"] == level:
vgroup = None
if rk in _vgroups.keys():
vgroup = _vgroups[rk]
if "group" in r["properties"].keys():
if _groups[r["properties"]["group"]].level == level and \
_groups[r["properties"]["group"]].group_type == "DIV":
group = _groups[r["properties"]["group"]]
else:
continue
if group is None:
self.status = "no diversity group reference in assignment {}".format(rk)
return False
if "resources" not in r["properties"].keys():
self.status = "group assignment format error"
return False
for vk in r["properties"]["resources"]:
if vk in _vms.keys():
vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].survgroup = vgroup
affinity_map[vk] = vgroup
self._add_implicit_diversity_groups(
vgroup, _vms[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
vgroup, _vms[vk].exclusivity_groups)
self._add_memberships(vgroup, _vms[vk])
del _vms[vk]
elif vk in _vgroups.keys():
vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(level):
self.status = ("grouping scope: nested "
"group's level is higher")
group.subgroups[vk] = _vms[vk]
_vms[vk].diversity_groups[group.orch_id] = ":".format(group.level, group.name)
elif vk in _groups.keys():
# FIXME(gjung): vk refers to GroupAssignment
# orch_id -> uuid of group
vg = _groups[vk]
if LEVEL.index(vg.level) > LEVEL.index(level):
self.status = "grouping scope: nested group's level is higher"
return False
if (vg.vgroup_type == "DIV" or
vg.vgroup_type == "EX"):
if not self._merge_subgroups(
vgroup, vg.subvgroups, _vms, _vgroups,
if vg.group_type == "DIV" or vg.group_type == "EX":
self.status = vg.group_type + " not allowd to be nested in diversity group"
return False
group.subgroups[vk] = vg
vg.diversity_groups[group.orch_id] = "{}:{}".format(group.level, group.name)
else:
self.status = "invalid resource {} in assignment {}".format(vk, rk)
return False
return True
def _merge_exclusivity_groups(self, _elements, _groups, _vms):
""" to merge exclusivity sub groups """
for level in LEVEL:
for rk, r in _elements.iteritems():
group = None
if "group" in r["properties"].keys():
if _groups[r["properties"]["group"]].level == level and \
_groups[r["properties"]["group"]].group_type == "EX":
group = _groups[r["properties"]["group"]]
else:
continue
if group is None:
self.status = "no group reference in exclusivity assignment {}".format(rk)
return False
if "resources" not in r["properties"].keys():
self.status = "group assignment format error"
return False
for vk in r["properties"]["resources"]:
if vk in _vms.keys():
group.subgroups[vk] = _vms[vk]
_vms[vk].exclusivity_groups[group.orch_id] = "{}:{}".format(group.level, group.name)
elif vk in _groups.keys():
vg = _groups[vk]
if LEVEL.index(vg.level) > LEVEL.index(level):
self.status = "grouping scope: nested group's level is higher"
return False
if vg.group_type == "DIV" or vg.group_type == "EX":
self.status = "({}) not allowd to be nested in exclusivity group".format(vg.group_type)
return False
group.subgroups[vk] = vg
vg.exclusivity_groups[group.orch_id] = group.level + ":" + group.name
else:
self.status = "invalid resource {} in assignment {}".format(vk, rk)
return False
return True
def _merge_affinity_groups(self, _elements, _groups, _vms):
# key is orch_id of vm or group & value is its parent group
affinity_map = {}
for level in LEVEL:
for rk, r in _elements.iteritems():
group = None
if "group" in r["properties"].keys():
if r["properties"]["group"] in _groups.keys():
if _groups[r["properties"]["group"]].level == level and \
_groups[r["properties"]["group"]].group_type == "AFF":
group = _groups[r["properties"]["group"]]
else:
continue
else:
continue
if group is None:
self.status = "no group reference in affinity assignment = " + rk
return False
if "resources" not in r["properties"].keys():
self.status = "group assignment format error"
return False
for vk in r["properties"]["resources"]:
if vk in _vms.keys():
self._merge_vm(group, vk, _vms, affinity_map)
elif vk in _groups.keys():
if not self._merge_group(group, vk, _groups, _vms,
_elements, affinity_map):
return False
del _vgroups[vk]
else:
if not self._exist_in_subgroups(vk, vgroup):
if not self._get_subgroups(
vg, _elements, _vgroups, _vms,
affinity_map):
return False
vgroup.subvgroups[vk] = vg
vg.survgroup = vgroup
affinity_map[vk] = vgroup
self._add_implicit_diversity_groups(
vgroup, vg.diversity_groups)
self._add_implicit_exclusivity_groups(
vgroup, vg.exclusivity_groups)
self._add_memberships(vgroup, vg)
del _vgroups[vk]
else:
# vk belongs to the other vgroup already
# or refer to invalid resource
# vk belongs to the other group already or
# refer to invalid resource
if vk not in affinity_map.keys():
self.status = "invalid resource = " + vk
self.status = "invalid resource = " + vk + " in assignment = " + rk
return False
if affinity_map[vk].uuid != vgroup.uuid:
if not self._exist_in_subgroups(vk, vgroup):
self._set_implicit_grouping(
vk, vgroup, affinity_map, _vgroups)
if affinity_map[vk].orch_id != group.orch_id:
if self._exist_in_subgroups(vk, group) is None:
self._set_implicit_grouping(vk,
group,
affinity_map,
_groups)
return True
def _merge_subgroups(self, _vgroup, _subgroups, _vms, _vgroups,
_elements, _affinity_map):
def _merge_subgroups(self, _group, _subgroups, _vms, _groups, _elements,
_affinity_map):
"""To merge recursive affinity sub groups"""
for vk, _ in _subgroups.iteritems():
if vk in _vms.keys():
_vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].survgroup = _vgroup
_affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(
_vgroup, _vms[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, _vms[vk].exclusivity_groups)
self._add_memberships(_vgroup, _vms[vk])
del _vms[vk]
elif vk in _vgroups.keys():
vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level):
self.status = ("grouping scope: nested group's level is "
"higher")
return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
if not self._merge_subgroups(_vgroup, vg.subvgroups,
_vms, _vgroups,
self._merge_vm(_group, vk, _vms, _affinity_map)
elif vk in _groups.keys():
if not self._merge_group(_group, vk, _groups, _vms,
_elements, _affinity_map):
return False
del _vgroups[vk]
else:
if self._exist_in_subgroups(vk, _vgroup) is None:
if not self._get_subgroups(vg, _elements, _vgroups,
_vms, _affinity_map):
return False
_vgroup.subvgroups[vk] = vg
vg.survgroup = _vgroup
_affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(
_vgroup, vg.diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, vg.exclusivity_groups)
self._add_memberships(_vgroup, vg)
del _vgroups[vk]
else:
# vk belongs to the other vgroup already
# or refer to invalid resource
# vk belongs to the other group already or
# refer to invalid resource
if vk not in _affinity_map.keys():
self.status = "invalid resource = " + vk
return False
if _affinity_map[vk].uuid != _vgroup.uuid:
if self._exist_in_subgroups(vk, _vgroup) is None:
self._set_implicit_grouping(
vk, _vgroup, _affinity_map, _vgroups)
if _affinity_map[vk].orch_id != _group.orch_id:
if self._exist_in_subgroups(vk, _group) is None:
self._set_implicit_grouping(vk, _group, _affinity_map,
_groups)
return True
def _get_subgroups(self, _vgroup, _elements,
_vgroups, _vms, _affinity_map):
for vk in _elements[_vgroup.uuid]["properties"]["resources"]:
def _merge_vm(self, _group, _vk, _vms, _affinity_map):
""" to merge a vm into the group """
_group.subgroups[_vk] = _vms[_vk]
_vms[_vk].surgroup = _group
_affinity_map[_vk] = _group
self._add_implicit_diversity_groups(_group,
_vms[_vk].diversity_groups)
self._add_implicit_exclusivity_groups(_group,
_vms[_vk].exclusivity_groups)
self._add_memberships(_group, _vms[_vk])
del _vms[_vk]
def _merge_group(self, _group, _vk, _groups, _vms, _elements,
_affinity_map):
""" to merge a group into the group """
vg = _groups[_vk]
if LEVEL.index(vg.level) > LEVEL.index(_group.level):
self.status = "grouping scope: nested group's level is higher"
return False
if vg.group_type == "DIV" or vg.group_type == "EX":
if not self._merge_subgroups(_group, vg.subgroups, _vms, _groups,
_elements, _affinity_map):
return False
del _groups[_vk]
else:
if self._exist_in_subgroups(_vk, _group) is None:
if not self._get_subgroups(vg, _elements, _groups, _vms,
_affinity_map):
return False
_group.subgroups[_vk] = vg
vg.surgroup = _group
_affinity_map[_vk] = _group
self._add_implicit_diversity_groups(_group,
vg.diversity_groups)
self._add_implicit_exclusivity_groups(_group,
vg.exclusivity_groups)
self._add_memberships(_group, vg)
del _groups[_vk]
return True
def _get_subgroups(self, _group, _elements, _groups, _vms, _affinity_map):
""" to merge all deeper subgroups """
for rk, r in _elements.iteritems():
if r["properties"]["group"] == _group.orch_id:
for vk in r["properties"]["resources"]:
if vk in _vms.keys():
_vgroup.subvgroups[vk] = _vms[vk]
_vms[vk].survgroup = _vgroup
_affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(
_vgroup, _vms[vk].diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, _vms[vk].exclusivity_groups)
self._add_memberships(_vgroup, _vms[vk])
del _vms[vk]
elif vk in _vgroups.keys():
vg = _vgroups[vk]
if LEVELS.index(vg.level) > LEVELS.index(_vgroup.level):
self.status = ("grouping scope: nested group's level is "
"higher")
return False
if vg.vgroup_type == "DIV" or vg.vgroup_type == "EX":
if not self._merge_subgroups(_vgroup, vg.subvgroups,
_vms, _vgroups,
self._merge_vm(_group, vk, _vms, _affinity_map)
elif vk in _groups.keys():
if not self._merge_group(_group, vk, _groups, _vms,
_elements, _affinity_map):
return False
del _vgroups[vk]
else:
if self._exist_in_subgroups(vk, _vgroup) is None:
if not self._get_subgroups(vg, _elements, _vgroups,
_vms, _affinity_map):
return False
_vgroup.subvgroups[vk] = vg
vg.survgroup = _vgroup
_affinity_map[vk] = _vgroup
self._add_implicit_diversity_groups(
_vgroup, vg.diversity_groups)
self._add_implicit_exclusivity_groups(
_vgroup, vg.exclusivity_groups)
self._add_memberships(_vgroup, vg)
del _vgroups[vk]
else:
if vk not in _affinity_map.keys():
self.status = "invalid resource = " + vk
return False
if _affinity_map[vk].uuid != _vgroup.uuid:
if self._exist_in_subgroups(vk, _vgroup) is None:
self._set_implicit_grouping(
vk, _vgroup, _affinity_map, _vgroups)
if _affinity_map[vk].orch_id != _group.orch_id:
if self._exist_in_subgroups(vk, _group) is None:
self._set_implicit_grouping(vk,
_group,
_affinity_map,
_groups)
return True
def _add_implicit_diversity_groups(self, _vgroup, _diversity_groups):
return False
def _add_implicit_diversity_groups(self, _group, _diversity_groups):
""" to add subgroup's diversity groups """
for dz, level in _diversity_groups.iteritems():
l = level.split(":", 1)[0]
if LEVELS.index(l) >= LEVELS.index(_vgroup.level):
_vgroup.diversity_groups[dz] = level
if LEVEL.index(l) >= LEVEL.index(_group.level):
_group.diversity_groups[dz] = level
def _add_implicit_exclusivity_groups(self, _vgroup, _exclusivity_groups):
def _add_implicit_exclusivity_groups(self, _group, _exclusivity_groups):
""" to add subgroup's exclusivity groups """
for ex, level in _exclusivity_groups.iteritems():
l = level.split(":", 1)[0]
if LEVELS.index(l) >= LEVELS.index(_vgroup.level):
_vgroup.exclusivity_groups[ex] = level
if LEVEL.index(l) >= LEVEL.index(_group.level):
_group.exclusivity_groups[ex] = level
def _add_memberships(self, _vgroup, _v):
if isinstance(_v, VM) or isinstance(_v, VGroup):
def _add_memberships(self, _group, _v):
""" to add subgroups's host-aggregates and AZs """
if isinstance(_v, VM) or isinstance(_v, Group):
for extra_specs in _v.extra_specs_list:
_vgroup.extra_specs_list.append(extra_specs)
_group.extra_specs_list.append(extra_specs)
if isinstance(_v, VM) and _v.availability_zone is not None:
if _v.availability_zone not in _vgroup.availability_zone_list:
_vgroup.availability_zone_list.append(_v.availability_zone)
if isinstance(_v, VGroup):
if _v.availability_zone not in _group.availability_zone_list:
_group.availability_zone_list.append(_v.availability_zone)
if isinstance(_v, Group):
for az in _v.availability_zone_list:
if az not in _vgroup.availability_zone_list:
_vgroup.availability_zone_list.append(az)
if az not in _group.availability_zone_list:
_group.availability_zone_list.append(az)
def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _groups):
""" take vk's most top parent as a s_vg's child group """
''' take vk's most top parent as a s_vg's child vgroup '''
def _set_implicit_grouping(self, _vk, _s_vg, _affinity_map, _vgroups):
t_vg = _affinity_map[_vk] # where _vk currently belongs to
if t_vg.uuid in _affinity_map.keys():
# if the parent belongs to the other parent vgroup
self._set_implicit_grouping(
t_vg.uuid, _s_vg, _affinity_map, _vgroups)
if t_vg.orch_id in _affinity_map.keys():
# if the parent belongs to the other parent group
self._set_implicit_grouping(t_vg.orch_id, _s_vg,
_affinity_map, _groups)
else:
if LEVELS.index(t_vg.level) > LEVELS.index(_s_vg.level):
if LEVEL.index(t_vg.level) > LEVEL.index(_s_vg.level):
t_vg.level = _s_vg.level
if self._exist_in_subgroups(t_vg.uuid, _s_vg) is None:
_s_vg.subvgroups[t_vg.uuid] = t_vg
t_vg.survgroup = _s_vg
_affinity_map[t_vg.uuid] = _s_vg
self._add_implicit_diversity_groups(
_s_vg, t_vg.diversity_groups)
self._add_implicit_exclusivity_groups(
_s_vg, t_vg.exclusivity_groups)
if self._exist_in_subgroups(t_vg.orch_id, _s_vg) is None:
_s_vg.subgroups[t_vg.orch_id] = t_vg
t_vg.surgroup = _s_vg
_affinity_map[t_vg.orch_id] = _s_vg
self._add_implicit_diversity_groups(_s_vg,
t_vg.diversity_groups)
self._add_implicit_exclusivity_groups(_s_vg,
t_vg.exclusivity_groups)
self._add_memberships(_s_vg, t_vg)
del _vgroups[t_vg.uuid]
del _groups[t_vg.orch_id]
def _exist_in_subgroups(self, _vk, _vg):
containing_vg_uuid = None
for vk, v in _vg.subvgroups.iteritems():
""" to check if vk exists in a group recursively """
containing_vg_id = None
for vk, v in _vg.subgroups.iteritems():
if vk == _vk:
containing_vg_uuid = _vg.uuid
containing_vg_id = _vg.orch_id
break
else:
if isinstance(v, VGroup):
containing_vg_uuid = self._exist_in_subgroups(_vk, v)
if containing_vg_uuid is not None:
if isinstance(v, Group):
containing_vg_id = self._exist_in_subgroups(_vk, v)
if containing_vg_id is not None:
break
return containing_vg_uuid
return containing_vg_id

View File

@ -1,74 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""App."""
class App(object):
"""App Class.
This class represents an app object that consists of the name and id of
the app, as well as the status and vms/volumes/vgroups it belogns to.
"""
def __init__(self, _app_id, _app_name, _action):
"""Init App."""
self.app_id = _app_id
self.app_name = _app_name
self.request_type = _action # create, replan, migrate, or ping
self.timestamp_scheduled = 0
self.vgroups = {}
self.vms = {}
self.status = 'requested' # Moved to "scheduled" (and then "placed")
def add_vm(self, _vm, _host_name, _status):
"""Add vm to app, set status to scheduled."""
self.vms[_vm.uuid] = _vm
self.vms[_vm.uuid].status = _status
self.vms[_vm.uuid].host = _host_name
def add_vgroup(self, _vg, _host_name):
"""Add vgroup to app, set status to scheduled."""
self.vgroups[_vg.uuid] = _vg
self.vgroups[_vg.uuid].status = "scheduled"
self.vgroups[_vg.uuid].host = _host_name
def get_json_info(self):
"""Return JSON info of App including vms, vols and vgs."""
vms = {}
for vmk, vm in self.vms.iteritems():
vms[vmk] = vm.get_json_info()
vgs = {}
for vgk, vg in self.vgroups.iteritems():
vgs[vgk] = vg.get_json_info()
return {'action': self.request_type,
'timestamp': self.timestamp_scheduled,
'stack_id': self.app_id,
'name': self.app_name,
'VMs': vms,
'VGroups': vgs}
def log_in_info(self):
"""Return in info related to login (time of login, app name, etc)."""
return {'action': self.request_type,
'timestamp': self.timestamp_scheduled,
'stack_id': self.app_id,
'name': self.app_name}

View File

@ -0,0 +1,107 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LEVEL = ["host", "rack", "cluster"]
class Group(object):
def __init__(self, _app_id, _orch_id):
self.app_id = _app_id # stack_id
self.orch_id = _orch_id # consistent and permanent key
self.name = None
self.group_type = "AFF"
self.level = None # host, rack, or cluster
self.surgroup = None # where this group belong to
self.subgroups = {} # child groups
self.diversity_groups = {} # cumulative diversity/exclusivity group
self.exclusivity_groups = {} # over this level. key=name, value=level
self.availability_zone_list = []
self.extra_specs_list = [] # cumulative extra_specs
self.vCPUs = 0
self.mem = 0 # MB
self.local_volume_size = 0 # GB
self.vCPU_weight = -1
self.mem_weight = -1
self.local_volume_weight = -1
self.host = None
self.sort_base = -1
def get_common_diversity(self, _diversity_groups):
common_level = "ANY"
for dk in self.diversity_groups.keys():
if dk in _diversity_groups.keys():
level = self.diversity_groups[dk].split(":")[0]
if common_level != "ANY":
if LEVEL.index(level) > LEVEL.index(common_level):
common_level = level
else:
common_level = level
return common_level
def get_affinity_id(self):
aff_id = None
if self.group_type == "AFF" and self.name != "any":
aff_id = self.level + ":" + self.name
return aff_id
def get_exclusivities(self, _level):
exclusivities = {}
for exk, level in self.exclusivity_groups.iteritems():
if level.split(":")[0] == _level:
exclusivities[exk] = level
return exclusivities
def get_json_info(self):
surgroup_id = None
if self.surgroup is None:
surgroup_id = "none"
else:
surgroup_id = self.surgroup.orch_id
subgroup_list = []
for vk in self.subgroups.keys():
subgroup_list.append(vk)
return {'name': self.name,
'group_type': self.group_type,
'level': self.level,
'surgroup': surgroup_id,
'subgroup_list': subgroup_list,
'diversity_groups': self.diversity_groups,
'exclusivity_groups': self.exclusivity_groups,
'availability_zones': self.availability_zone_list,
'extra_specs_list': self.extra_specs_list,
'cpus': self.vCPUs,
'mem': self.mem,
'local_volume': self.local_volume_size,
'cpu_weight': self.vCPU_weight,
'mem_weight': self.mem_weight,
'local_volume_weight': self.local_volume_weight,
'host': self.host}

View File

@ -0,0 +1,258 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
import time
class Placement(object):
'''Container to hold a placement info.'''
def __init__(self, _uuid):
self.uuid = _uuid
self.stack_id = None
self.host = None
self.orch_id = None
self.state = None
self.original_host = None
self.dirty = False
self.status = None
self.timestamp = 0
def get_json_info(self):
return {'uuid': self.uuid,
'stack_id': self.stack_id,
'host': self.host,
'orch_id': self.orch_id,
'state': self.state,
'original_host': self.original_host,
'dirty': self.dirty,
'status': self.status,
'timestamp': self.timestamp}
class PlacementHandler(object):
'''Placement handler to cache and store placements.'''
def __init__(self, _db, _logger):
self.placements = {} # key = uuid, value = Placement instance
self.max_cache = 5000
self.min_cache = 1000
self.db = _db
self.logger = _logger
def flush_cache(self):
'''Unload placements from cache based on LRU.'''
if len(self.placements) > self.max_cache:
count = 0
num_of_removes = len(self.placements) - self.min_cache
remove_item_list = []
for placement in (sorted(self.placements.values(),
key=operator.attrgetter('timestamp'))):
remove_item_list.append(placement.uuid)
count += 1
if count == num_of_removes:
break
for uuid in remove_item_list:
self.unload_placement(uuid)
def load_placement(self, _uuid):
'''Patch to cache from db.'''
p = self.db.get_placement(_uuid)
if p is None:
return None
elif len(p) == 0:
return Placement("none")
placement = Placement(_uuid)
placement.uuid = p["uuid"]
placement.stack_id = p["stack_id"]
placement.host = p["host"]
placement.orch_id = p["orch_id"]
placement.state = p["state"]
placement.original_host = p["original_host"]
placement.dirty = p["dirty"]
placement.status = p["status"]
placement.timestamp = float(p["timestamp"])
self.placements[_uuid] = placement
return placement
def unload_placement(self, _uuid):
'''Remove from cache.'''
if _uuid in self.placements.keys():
placement = self.placements[_uuid]
if placement.dirty is False:
del self.placements[_uuid]
def store_placement(self, _uuid, _placement):
'''Store changed placement to db.'''
placement_data = {}
placement_data["uuid"] = _uuid
placement_data["stack_id"] = _placement.stack_id
placement_data["host"] = _placement.host
placement_data["orch_id"] = _placement.orch_id
placement_data["state"] = _placement.state
placement_data["original_host"] = _placement.original_host
placement_data["dirty"] = _placement.dirty
placement_data["status"] = _placement.status
placement_data["timestamp"] = _placement.timestamp
if not self.db.store_placement(placement_data):
return False
return True
def get_placement(self, _uuid):
'''Get placement info from db or cache.'''
if _uuid not in self.placements.keys():
placement = self.load_placement(_uuid)
if placement is None:
return None
elif placement.uuid == "none":
return placement
else:
self.logger.debug("hit placement cache")
return self.placements[_uuid]
def get_placements(self):
'''Get all placements from db.'''
placement_list = self.db.get_placements()
if placement_list is None:
return None
return placement_list
def delete_placement(self, _uuid):
'''Delete placement from cache and db.'''
if _uuid in self.placements.keys():
del self.placements[_uuid]
if not self.db.delete_placement(_uuid):
return False
return True
def insert_placement(self, _uuid, _stack_id, _host, _orch_id, _state):
'''Insert (Update) new (existing) placement into cache and db.'''
placement = Placement(_uuid)
placement.stack_id = _stack_id
placement.host = _host
placement.orch_id = _orch_id
placement.state = _state
placement.original_host = None
placement.timestamp = time.time()
placement.status = "verified"
placement.dirty = True
self.placements[_uuid] = placement
if not self.store_placement(_uuid, placement):
return None
return placement
def update_placement(self, _uuid, stack_id=None, host=None, orch_id=None, state=None):
'''Update exsiting placement info in cache.'''
placement = self.get_placement(_uuid)
if placement is None or placement.uuid == "none":
return False
if stack_id is not None:
if placement.stack_id is None or placement.stack_id == "none" or placement.stack_id != stack_id:
placement.stack_id = stack_id
placement.timestamp = time.time()
placement.dirty = True
if host is not None:
if placement.host != host:
placement.host = host
placement.timestamp = time.time()
placement.dirty = True
if orch_id is not None:
if placement.orch_id is None or placement.orch_id == "none" or placement.orch_id != orch_id:
placement.orch_id = orch_id
placement.timestamp = time.time()
placement.dirty = True
if state is not None:
if placement.state is None or placement.state == "none" or placement.state != state:
placement.state = state
placement.timestamp = time.time()
placement.dirty = True
if not self.store_placement(_uuid, placement):
return False
return True
def set_original_host(self, _uuid):
'''Set the original host before migration.'''
placement = self.get_placement(_uuid)
if placement is None or placement.uuid == "none":
return False
placement.original_host = placement.host
placement.timestamp = time.time()
placement.dirty = True
if not self.store_placement(_uuid, placement):
return False
return True
def set_verified(self, _uuid):
'''Mark this vm as verified.'''
placement = self.get_placement(_uuid)
if placement is None or placement.uuid == "none":
return False
if placement.status != "verified":
self.logger.info("this vm is just verified")
placement.status = "verified"
placement.timestamp = time.time()
placement.dirty = True
if not self.store_placement(_uuid, placement):
return False
return True
def set_unverified(self, _uuid):
'''Mark this vm as not verified yet.'''
placement = self.get_placement(_uuid)
if placement is None or placement.uuid == "none":
return False
self.logger.info("this vm is not verified yet")
placement.status = "none"
placement.timestamp = time.time()
placement.dirty = True
if not self.store_placement(_uuid, placement):
return False
return True

View File

@ -0,0 +1,106 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.group import LEVEL
class VM(object):
def __init__(self, _app_id, _orch_id):
self.app_id = _app_id
self.orch_id = _orch_id
self.uuid = None # permanent physical uuid
self.name = None
self.surgroup = None # Group where this vm belongs to
self.diversity_groups = {}
self.exclusivity_groups = {}
self.availability_zone = None
self.extra_specs_list = []
self.flavor = None
self.image = None
self.vCPUs = 0
self.mem = 0 # MB
self.local_volume_size = 0 # GB
self.vCPU_weight = -1
self.mem_weight = -1
self.local_volume_weight = -1
self.host = None
self.sort_base = -1
def get_common_diversity(self, _diversity_groups):
common_level = "ANY"
for dk in self.diversity_groups.keys():
if dk in _diversity_groups.keys():
level = self.diversity_groups[dk].split(":")[0]
if common_level != "ANY":
if LEVEL.index(level) > LEVEL.index(common_level):
common_level = level
else:
common_level = level
return common_level
def get_exclusivities(self, _level):
exclusivities = {}
for exk, level in self.exclusivity_groups.iteritems():
if level.split(":")[0] == _level:
exclusivities[exk] = level
return exclusivities
def get_json_info(self):
surgroup_id = None
if self.surgroup is None:
surgroup_id = "none"
else:
surgroup_id = self.surgroup.orch_id
availability_zone = None
if self.availability_zone is None:
availability_zone = "none"
else:
availability_zone = self.availability_zone
uuid = None
if self.uuid is not None and self.uuid != "none":
uuid = self.uuid
else:
uuid = "none"
return {'name': self.name,
'uuid': uuid,
'surgroup': surgroup_id,
'diversity_groups': self.diversity_groups,
'exclusivity_groups': self.exclusivity_groups,
'availability_zones': availability_zone,
'extra_specs_list': self.extra_specs_list,
'flavor': self.flavor,
'image': self.image,
'cpus': self.vCPUs,
'mem': self.mem,
'local_volume': self.local_volume_size,
'cpu_weight': self.vCPU_weight,
'mem_weight': self.mem_weight,
'local_volume_weight': self.local_volume_weight,
'host': self.host}

View File

@ -0,0 +1,619 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Music Handler."""
import json
import operator
from oslo_log import log
from valet.common.music import Music
from valet.engine.optimizer.db_connect.event import Event
# from valet.engine.optimizer.simulator.workload_generator import WorkloadGen
LOG = log.getLogger(__name__)
class DBHandler(object):
"""This Class consists of functions that interact with the music
database for valet and returns/deletes/updates objects within it.
"""
def __init__(self, _config):
"""Init Music Handler."""
self.config = _config
# self.db = WorkloadGen(self.config, LOG)
self.db = Music(hosts=self.config.hosts, port=self.config.port,
replication_factor=self.config.replication_factor,
music_server_retries=self.config.music_server_retries,
logger=LOG)
def get_events(self):
"""Get events from nova
This function obtains all events from the database and then
iterates through all of them to check the method and perform the
corresponding action on them. Return Event list.
"""
event_list = []
events = {}
try:
events = self.db.read_all_rows(self.config.db_keyspace, self.config.db_event_table)
except Exception as e:
LOG.error("DB: miss events: " + str(e))
return []
if len(events) > 0:
for _, row in events.iteritems():
event_id = row['timestamp']
exchange = row['exchange']
method = row['method']
args_data = row['args']
LOG.debug("MusicHandler.get_events: event (" +
event_id + ") is entered")
if exchange != "nova":
if self.delete_event(event_id) is False:
return None
LOG.debug(
"MusicHandler.get_events: event exchange "
"(" + exchange + ") is not supported")
continue
if method != 'object_action' and method != 'build_and_run_' \
'instance':
if self.delete_event(event_id) is False:
return None
LOG.debug("MusicHandler.get_events: event method "
"(" + method + ") is not considered")
continue
if len(args_data) == 0:
if self.delete_event(event_id) is False:
return None
LOG.debug("MusicHandler.get_events: "
"event does not have args")
continue
try:
args = json.loads(args_data)
except (ValueError, KeyError, TypeError):
LOG.warn("DB: while decoding to json event = " + method +
":" + event_id)
continue
# TODO(lamt) this block of code can use refactoring
if method == 'object_action':
if 'objinst' in args.keys():
objinst = args['objinst']
if 'nova_object.name' in objinst.keys():
nova_object_name = objinst['nova_object.name']
if nova_object_name == 'Instance':
if 'nova_object.changes' in objinst.keys() and \
'nova_object.data' in objinst.keys():
change_list = objinst[
'nova_object.changes']
change_data = objinst['nova_object.data']
if 'vm_state' in change_list and \
'vm_state' in change_data.keys():
if (change_data['vm_state'] ==
'deleted' or
change_data['vm_state'] ==
'active'):
e = Event(event_id)
e.exchange = exchange
e.method = method
e.args = args
event_list.append(e)
else:
msg = "unknown vm_state = %s"
LOG.warning(
msg % change_data["vm_state"])
if 'uuid' in change_data.keys():
msg = " uuid = %s"
LOG.warning(
msg % change_data['uuid'])
if not self.delete_event(event_id):
return None
else:
if not self.delete_event(event_id):
return None
else:
if self.delete_event(event_id) is False:
return None
elif nova_object_name == 'ComputeNode':
if 'nova_object.changes' in objinst.keys() and \
'nova_object.data' in objinst.keys():
e = Event(event_id)
e.exchange = exchange
e.method = method
e.args = args
event_list.append(e)
else:
if self.delete_event(event_id) is False:
return None
else:
if self.delete_event(event_id) is False:
return None
else:
if self.delete_event(event_id) is False:
return None
else:
if self.delete_event(event_id) is False:
return None
elif method == 'build_and_run_instance':
if 'filter_properties' not in args.keys():
if self.delete_event(event_id) is False:
return None
continue
# NOTE(GJ): do not check the existance of scheduler_hints
if 'instance' not in args.keys():
if self.delete_event(event_id) is False:
return None
continue
else:
instance = args['instance']
if 'nova_object.data' not in instance.keys():
if self.delete_event(event_id) is False:
return None
continue
e = Event(event_id)
e.exchange = exchange
e.method = method
e.args = args
event_list.append(e)
error_event_list = []
for e in event_list:
e.set_data()
if e.method == "object_action":
if e.object_name == 'Instance':
if e.uuid is None or e.uuid == "none" or \
e.host is None or e.host == "none" or \
e.vcpus == -1 or e.mem == -1:
error_event_list.append(e)
LOG.warn("DB: data missing in instance object event")
elif e.object_name == 'ComputeNode':
if e.host is None or e.host == "none":
error_event_list.append(e)
LOG.warn("DB: data missing in compute object event")
elif e.method == "build_and_run_instance":
if e.uuid is None or e.uuid == "none":
error_event_list.append(e)
LOG.warning("MusicHandler.get_events: data missing "
"in build event")
if len(error_event_list) > 0:
event_list[:] = [e for e in event_list if e not in error_event_list]
if len(event_list) > 0:
# event_id is timestamp
event_list.sort(key=operator.attrgetter('event_id'))
return event_list
def delete_event(self, _e):
"""Delete event."""
try:
self.db.delete_row_eventually(self.config.db_keyspace,
self.config.db_event_table,
'timestamp', _e)
except Exception as e:
LOG.error("DB: while deleting event: " + str(e))
return False
return True
def get_requests(self):
"""Get requests from valet-api."""
request_list = []
requests = {}
try:
requests = self.db.read_all_rows(self.config.db_keyspace,
self.config.db_request_table)
except Exception as e:
LOG.error("DB: miss requests: " + str(e))
return []
if len(requests) > 0:
for _, row in requests.iteritems():
r_list = json.loads(row['request'])
LOG.debug("*** input = " + json.dumps(r_list, indent=4))
for r in r_list:
request_list.append(r)
return request_list
def put_result(self, _result):
"""Return result and delete handled request."""
for rk, r in _result.iteritems():
LOG.debug("*** output = " + json.dumps(r, indent=4))
data = {
'stack_id': rk,
'placement': json.dumps(r)
}
try:
self.db.create_row(self.config.db_keyspace,
self.config.db_response_table, data)
except Exception as e:
LOG.error("DB: while putting placement result: " + str(e))
return False
return True
def delete_requests(self, _result):
"""Delete finished requests."""
for rk in _result.keys():
try:
self.db.delete_row_eventually(self.config.db_keyspace,
self.config.db_request_table,
'stack_id', rk)
except Exception as e:
LOG.error("DB: while deleting handled request: " + str(e))
return False
return True
def get_stack(self, _stack_id):
"""Get stack info."""
json_app = {}
row = {}
try:
row = self.db.read_row(self.config.db_keyspace,
self.config.db_app_table,
'stack_id', _stack_id)
except Exception as e:
LOG.error("DB: while getting stack info: " + str(e))
return None
if len(row) > 0:
str_app = row[row.keys()[0]]['app']
json_app = json.loads(str_app)
return json_app
def store_stack(self, _stack_data):
"""Store stack info."""
stack_id = _stack_data["stack_id"]
if not self.delete_stack(stack_id):
return False
LOG.debug("store stack = " + json.dumps(_stack_data, indent=4))
data = {
'stack_id': stack_id,
'app': json.dumps(_stack_data)
}
try:
self.db.create_row(self.config.db_keyspace,
self.config.db_app_table, data)
except Exception as e:
LOG.error("DB: while storing app: " + str(e))
return False
return True
def delete_stack(self, _s_id):
"""Delete stack."""
try:
self.db.delete_row_eventually(self.config.db_keyspace,
self.config.db_app_table,
'stack_id', _s_id)
except Exception as e:
LOG.error("DB: while deleting app: " + str(e))
return False
return True
def delete_placement_from_stack(self, _stack_id, orch_id=None, uuid=None,
time=None):
"""Update stack by removing a placement from stack resources."""
stack = self.get_stack(_stack_id)
if stack is None:
return False
if len(stack) > 0:
if orch_id is not None:
del stack["resources"][orch_id]
elif uuid is not None:
pk = None
for rk, r in stack["resources"].iteritems():
if "resource_id" in r.keys() and uuid == r["resource_id"]:
pk = rk
break
if pk is not None:
del stack["resources"][pk]
if time is not None:
stack["timestamp"] = time
if not self.store_stack(stack):
return False
return True
def update_stack(self, _stack_id, orch_id=None, uuid=None, host=None,
time=None):
"""Update stack by changing host and/or uuid of vm in stack resources.
"""
stack = self.get_stack(_stack_id)
if stack is None:
return False
if len(stack) > 0:
if orch_id is not None:
if orch_id in stack["resources"].keys():
if uuid is not None:
stack["resources"][orch_id]["resource_id"] = uuid
if host is not None:
stack["resources"][orch_id]["properties"]["host"] = host
elif uuid is not None:
for rk, r in stack["resources"].iteritems():
if "resource_id" in r.keys() and uuid == r["resource_id"]:
if host is not None:
r["properties"]["host"] = host
break
if time is not None:
stack["timestamp"] = time
if not self.store_stack(stack):
return False
return True
def get_placement(self, _uuid):
"""Get placement info of given vm."""
row = {}
try:
row = self.db.read_row(self.config.db_keyspace,
self.config.db_uuid_table, 'uuid', _uuid)
except Exception as e:
LOG.error("DB: while getting vm placement info: " + str(e))
return None
if len(row) > 0:
str_data = row[row.keys()[0]]['metadata']
json_data = json.loads(str_data)
return json_data
else:
return {}
def get_placements(self):
"""Get all placements."""
placement_list = []
results = {}
try:
results = self.db.read_all_rows(self.config.db_keyspace,
self.config.db_uuid_table)
except Exception as e:
LOG.error("DB: while getting all placements: " + str(e))
return None
if len(results) > 0:
for _, row in results.iteritems():
placement_list.append(json.loads(row['metadata']))
return placement_list
def store_placement(self, _placement_data):
"""Store placement info of given vm."""
uuid = _placement_data["uuid"]
if not self.delete_placement(uuid):
return False
LOG.debug("store placement = " + json.dumps(_placement_data, indent=4))
data = {
'uuid': uuid,
'metadata': json.dumps(_placement_data)
}
try:
self.db.create_row(self.config.db_keyspace,
self.config.db_uuid_table, data)
except Exception as e:
LOG.error("DB: while inserting placement: " + str(e))
return False
return True
def delete_placement(self, _uuid):
"""Delete placement."""
try:
self.db.delete_row_eventually(self.config.db_keyspace,
self.config.db_uuid_table,
'uuid', _uuid)
except Exception as e:
LOG.error("DB: while deleting vm placement info: " + str(e))
return False
return True
def get_resource_status(self, _k):
"""Get resource status."""
json_resource = {}
row = {}
try:
row = self.db.read_row(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k, log=LOG)
except Exception as e:
LOG.error("MUSIC error while reading resource status: " +
str(e))
return None
if len(row) > 0:
str_resource = row[row.keys()[0]]['resource']
json_resource = json.loads(str_resource)
return json_resource
def update_resource_status(self, _k, _status):
"""Update resource status."""
row = {}
try:
row = self.db.read_row(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k)
except Exception as e:
LOG.error("MUSIC error while reading resource status: " + str(e))
return False
json_resource = {}
if len(row) > 0:
str_resource = row[row.keys()[0]]['resource']
json_resource = json.loads(str_resource)
if 'flavors' in _status.keys():
for fk, f in _status['flavors'].iteritems():
if 'flavors' not in json_resource.keys():
json_resource['flavors'] = {}
json_resource['flavors'][fk] = f
if 'groups' in _status.keys():
for lgk, lg in _status['groups'].iteritems():
if 'groups' not in json_resource.keys():
json_resource['groups'] = {}
json_resource['groups'][lgk] = lg
if 'hosts' in _status.keys():
for hk, h in _status['hosts'].iteritems():
if 'hosts' not in json_resource.keys():
json_resource['hosts'] = {}
json_resource['hosts'][hk] = h
if 'host_groups' in _status.keys():
for hgk, hg in _status['host_groups'].iteritems():
if 'host_groups' not in json_resource.keys():
json_resource['host_groups'] = {}
json_resource['host_groups'][hgk] = hg
if 'datacenter' in _status.keys():
json_resource['datacenter'] = _status['datacenter']
json_resource['timestamp'] = _status['timestamp']
try:
self.db.delete_row_eventually(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k)
except Exception as e:
LOG.error("MUSIC error while deleting resource "
"status: " + str(e))
return False
else:
json_resource = _status
LOG.debug("store resource status = " + json.dumps(json_resource,
indent=4))
data = {
'site_name': _k,
'resource': json.dumps(json_resource)
}
try:
self.db.create_row(self.config.db_keyspace,
self.config.db_resource_table, data)
except Exception as e:
LOG.error("DB could not create row in resource table: " + str(e))
return False
return True
def get_group(self, _g_id):
"""Get valet group info of given group identifier."""
group_info = {}
row = self._get_group_by_name(_g_id)
if row is None:
return None
if len(row) > 0:
group_info["id"] = row[row.keys()[0]]['id']
group_info["level"] = row[row.keys()[0]]['level']
group_info["type"] = row[row.keys()[0]]['type']
group_info["members"] = json.loads(row[row.keys()[0]]['members'])
group_info["name"] = row[row.keys()[0]]['name']
return group_info
else:
row = self._get_group_by_id(_g_id)
if row is None:
return None
if len(row) > 0:
group_info["id"] = row[row.keys()[0]]['id']
group_info["level"] = row[row.keys()[0]]['level']
group_info["type"] = row[row.keys()[0]]['type']
group_info["members"] = json.loads(row[row.keys()[0]]['members'])
group_info["name"] = row[row.keys()[0]]['name']
return group_info
else:
return {}
def _get_group_by_name(self, _name):
"""Get valet group info of given group name."""
row = {}
try:
row = self.db.read_row(self.config.db_keyspace,
self.config.db_group_table,
'name', _name)
except Exception as e:
LOG.error("DB: while getting group info by name: " + str(e))
return None
return row
def _get_group_by_id(self, _id):
"""Get valet group info of given group id."""
row = {}
try:
row = self.db.read_row(self.config.db_keyspace,
self.config.db_group_table, 'id', _id)
except Exception as e:
LOG.error("DB: while getting group info by id: " + str(e))
return None
return row

View File

@ -1,691 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Music Handler."""
import json
import operator
from oslo_log import log
from valet.common.music import Music
from valet.engine.optimizer.db_connect.event import Event
LOG = log.getLogger(__name__)
def ensurekey(d, k):
return d.setdefault(k, {})
# FIXME(GJ): make MUSIC as pluggable
class MusicHandler(object):
"""Music Handler Class.
This Class consists of functions that interact with the music
database for valet and returns/deletes/updates objects within it.
"""
def __init__(self, _config):
"""Init Music Handler."""
self.config = _config
self.music = Music(
hosts=self.config.hosts, port=self.config.port,
replication_factor=self.config.replication_factor,
music_server_retries=self.config.music_server_retries)
if self.config.hosts is not None:
LOG.info("DB: music host = %s", self.config.hosts)
if self.config.replication_factor is not None:
LOG.info("DB: music replication factor = %s ",
str(self.config.replication_factor))
# FIXME(GJ): this may not need
def init_db(self):
"""Init Database.
This function initializes a database in Music by creating all the
necessary tables with the proper schemas in Music using API calls.
Return True if no exceptions are caught.
"""
LOG.info("MusicHandler.init_db: create table")
try:
self.music.create_keyspace(self.config.db_keyspace)
except Exception as e:
LOG.error("DB could not create keyspace: " + str(e))
return False
schema = {
'stack_id': 'text',
'request': 'text',
'PRIMARY KEY': '(stack_id)'
}
try:
self.music.create_table(self.config.db_keyspace,
self.config.db_request_table, schema)
except Exception as e:
LOG.error("DB could not create request table: " + str(e))
return False
schema = {
'stack_id': 'text',
'placement': 'text',
'PRIMARY KEY': '(stack_id)'
}
try:
self.music.create_table(self.config.db_keyspace,
self.config.db_response_table, schema)
except Exception as e:
LOG.error("DB could not create response table: " + str(e))
return False
schema = {
'timestamp': 'text',
'exchange': 'text',
'method': 'text',
'args': 'text',
'PRIMARY KEY': '(timestamp)'
}
try:
self.music.create_table(self.config.db_keyspace,
self.config.db_event_table, schema)
except Exception as e:
LOG.error("DB could not create event table: " + str(e))
return False
schema = {
'site_name': 'text',
'resource': 'text',
'PRIMARY KEY': '(site_name)'
}
try:
self.music.create_table(self.config.db_keyspace,
self.config.db_resource_table, schema)
except Exception as e:
LOG.error("DB could not create resource table: " + str(e))
return False
schema = {
'stack_id': 'text',
'app': 'text',
'PRIMARY KEY': '(stack_id)'
}
try:
self.music.create_table(self.config.db_keyspace,
self.config.db_app_table, schema)
except Exception as e:
LOG.error("DB could not create app table: " + str(e))
return False
schema = {
'uuid': 'text',
'h_uuid': 'text',
's_uuid': 'text',
'PRIMARY KEY': '(uuid)'
}
try:
self.music.create_table(self.config.db_keyspace,
self.config.db_uuid_table, schema)
except Exception as e:
LOG.error("DB could not create uuid table: " + str(e))
return False
return True
# TODO(GJ): evaluate the delay
def get_events(self):
"""Get Events.
This function obtains all events from the database and then
iterates through all of them to check the method and perform the
corresponding action on them. Return Event list.
"""
event_list = []
events = {}
try:
events = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_event_table)
except Exception as e:
LOG.error("DB:event: " + str(e))
# FIXME(GJ): return None?
return {}
if len(events) > 0:
for _, row in events.iteritems():
event_id = row['timestamp']
exchange = row['exchange']
method = row['method']
args_data = row['args']
LOG.debug("MusicHandler.get_events: event (" +
event_id + ") is entered")
if exchange != "nova":
if self.delete_event(event_id) is False:
return None
LOG.debug(
"MusicHandler.get_events: event exchange "
"(" + exchange + ") is not supported")
continue
if method != 'object_action' and method != 'build_and_run_' \
'instance':
if self.delete_event(event_id) is False:
return None
LOG.debug("MusicHandler.get_events: event method "
"(" + method + ") is not considered")
continue
if len(args_data) == 0:
if self.delete_event(event_id) is False:
return None
LOG.debug("MusicHandler.get_events: "
"event does not have args")
continue
try:
args = json.loads(args_data)
except (ValueError, KeyError, TypeError):
LOG.warning("MusicHandler.get_events: error while "
"decoding to JSON event = " + method +
":" + event_id)
continue
# TODO(lamt) this block of code can use refactoring
if method == 'object_action':
if 'objinst' in args.keys():
objinst = args['objinst']
if 'nova_object.name' in objinst.keys():
nova_object_name = objinst['nova_object.name']
if nova_object_name == 'Instance':
if 'nova_object.changes' in objinst.keys() and \
'nova_object.data' in objinst.keys():
change_list = objinst[
'nova_object.changes']
change_data = objinst['nova_object.data']
if 'vm_state' in change_list and \
'vm_state' in change_data.keys():
if (change_data['vm_state'] ==
'deleted' or
change_data['vm_state'] ==
'active'):
e = Event(event_id)
e.exchange = exchange
e.method = method
e.args = args
event_list.append(e)
else:
msg = "unknown vm_state = %s"
LOG.warning(
msg % change_data["vm_state"])
if 'uuid' in change_data.keys():
msg = " uuid = %s"
LOG.warning(
msg % change_data['uuid'])
if not self.delete_event(event_id):
return None
else:
if not self.delete_event(event_id):
return None
else:
if self.delete_event(event_id) is False:
return None
elif nova_object_name == 'ComputeNode':
if 'nova_object.changes' in objinst.keys() and \
'nova_object.data' in objinst.keys():
e = Event(event_id)
e.exchange = exchange
e.method = method
e.args = args
event_list.append(e)
else:
if self.delete_event(event_id) is False:
return None
else:
if self.delete_event(event_id) is False:
return None
else:
if self.delete_event(event_id) is False:
return None
else:
if self.delete_event(event_id) is False:
return None
elif method == 'build_and_run_instance':
if 'filter_properties' not in args.keys():
if self.delete_event(event_id) is False:
return None
continue
# NOTE(GJ): do not check the existance of scheduler_hints
if 'instance' not in args.keys():
if self.delete_event(event_id) is False:
return None
continue
else:
instance = args['instance']
if 'nova_object.data' not in instance.keys():
if self.delete_event(event_id) is False:
return None
continue
e = Event(event_id)
e.exchange = exchange
e.method = method
e.args = args
event_list.append(e)
error_event_list = []
for e in event_list:
e.set_data()
if e.method == "object_action":
if e.object_name == 'Instance':
if e.uuid is None or e.uuid == "none" or \
e.host is None or e.host == "none" or \
e.vcpus == -1 or e.mem == -1:
error_event_list.append(e)
LOG.warning("MusicHandler.get_events: data "
"missing in instance object event")
elif e.object_name == 'ComputeNode':
if e.host is None or e.host == "none":
error_event_list.append(e)
LOG.warning("MusicHandler.get_events: data "
"missing in compute object event")
elif e.method == "build_and_run_instance":
if e.uuid is None or e.uuid == "none":
error_event_list.append(e)
LOG.warning("MusicHandler.get_events: data missing "
"in build event")
if len(error_event_list) > 0:
event_list[:] = [
e for e in event_list if e not in error_event_list]
if len(event_list) > 0:
event_list.sort(key=operator.attrgetter('event_id'))
return event_list
def delete_event(self, _event_id):
"""Return True after deleting corresponding event row in db."""
try:
self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_event_table,
'timestamp', _event_id)
except Exception as e:
LOG.error("DB: while deleting event: " + str(e))
return False
return True
def get_uuid(self, _uuid):
"""Return h_uuid and s_uuid from matching _uuid row in music db."""
h_uuid = "none"
s_uuid = "none"
row = {}
try:
row = self.music.read_row(self.config.db_keyspace,
self.config.db_uuid_table, 'uuid', _uuid)
except Exception as e:
LOG.error("DB: while reading uuid: " + str(e))
return None
if len(row) > 0:
h_uuid = row[row.keys()[0]]['h_uuid']
s_uuid = row[row.keys()[0]]['s_uuid']
return h_uuid, s_uuid
def put_uuid(self, _e):
"""Insert uuid, h_uuid and s_uuid from event into new row in db."""
heat_resource_uuid = "none"
heat_root_stack_id = "none"
if _e.heat_resource_uuid is not None and \
_e.heat_resource_uuid != "none":
heat_resource_uuid = _e.heat_resource_uuid
else:
heat_resource_uuid = _e.uuid
if _e.heat_root_stack_id is not None and \
_e.heat_root_stack_id != "none":
heat_root_stack_id = _e.heat_root_stack_id
else:
heat_root_stack_id = _e.uuid
data = {
'uuid': _e.uuid,
'h_uuid': heat_resource_uuid,
's_uuid': heat_root_stack_id
}
try:
self.music.create_row(self.config.db_keyspace,
self.config.db_uuid_table, data)
except Exception as e:
LOG.error("DB: while inserting uuid: " + str(e))
return False
return True
def delete_uuid(self, _k):
"""Return True after deleting row corresponding to event uuid."""
try:
self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_uuid_table, 'uuid',
_k)
except Exception as e:
LOG.error("DB: while deleting uuid: " + str(e))
return False
return True
def get_requests(self):
"""Return list of requests that consists of all rows in a db table."""
request_list = []
requests = {}
try:
requests = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_request_table)
except Exception as e:
LOG.error("DB: while reading requests: " + str(e))
# FIXME(GJ): return None?
return {}
if len(requests) > 0:
LOG.info("MusicHandler.get_requests: placement request arrived")
for _, row in requests.iteritems():
LOG.info(" request_id = " + row['stack_id'])
r_list = json.loads(row['request'])
for r in r_list:
request_list.append(r)
return request_list
def put_result(self, _result):
"""Return True after putting result in db(create and delete rows)."""
for appk, app_placement in _result.iteritems():
data = {
'stack_id': appk,
'placement': json.dumps(app_placement)
}
try:
self.music.create_row(self.config.db_keyspace,
self.config.db_response_table, data)
except Exception as e:
LOG.error("MUSIC error while putting placement "
"result: " + str(e))
return False
for appk in _result.keys():
try:
self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_request_table,
'stack_id', appk)
except Exception as e:
LOG.error("MUSIC error while deleting handled "
"request: " + str(e))
return False
return True
def get_resource_status(self, _k):
"""Get Row of resource related to '_k' and return resource as json."""
json_resource = {}
row = {}
try:
row = self.music.read_row(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k)
except Exception as e:
LOG.error("MUSIC error while reading resource status: " +
str(e))
return None
if len(row) > 0:
str_resource = row[row.keys()[0]]['resource']
json_resource = json.loads(str_resource)
return json_resource
def update_resource_status(self, _k, _status):
"""Update resource to the new _status (flavors, lgs, hosts, etc)."""
row = {}
try:
row = self.music.read_row(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k)
except Exception as e:
LOG.error("MUSIC error while reading resource status: " +
str(e))
return False
json_resource = {}
if len(row) > 0:
str_resource = row[row.keys()[0]]['resource']
json_resource = json.loads(str_resource)
if 'flavors' in _status.keys():
flavors = _status['flavors']
for fk, f in flavors.iteritems():
if fk in ensurekey(json_resource, 'flavors').keys():
del json_resource['flavors'][fk]
json_resource['flavors'][fk] = f
if 'logical_groups' in _status.keys():
logical_groups = _status['logical_groups']
for lgk, lg in logical_groups.iteritems():
keys = ensurekey(json_resource, 'logical_groups').keys()
if lgk in keys:
del json_resource['logical_groups'][lgk]
json_resource['logical_groups'][lgk] = lg
if 'hosts' in _status.keys():
hosts = _status['hosts']
for hk, h in hosts.iteritems():
if hk in ensurekey(json_resource, 'hosts').keys():
del json_resource['hosts'][hk]
json_resource['hosts'][hk] = h
if 'host_groups' in _status.keys():
host_groupss = _status['host_groups']
for hgk, hg in host_groupss.iteritems():
if hgk in ensurekey(json_resource, 'host_groups').keys():
del json_resource['host_groups'][hgk]
json_resource['host_groups'][hgk] = hg
if 'datacenter' in _status.keys():
datacenter = _status['datacenter']
del json_resource['datacenter']
json_resource['datacenter'] = datacenter
json_resource['timestamp'] = _status['timestamp']
try:
self.music.delete_row_eventually(self.config.db_keyspace,
self.config.db_resource_table,
'site_name', _k)
except Exception as e:
LOG.error("MUSIC error while deleting resource "
"status: " + str(e))
return False
else:
json_resource = _status
data = {
'site_name': _k,
'resource': json.dumps(json_resource)
}
try:
self.music.create_row(self.config.db_keyspace,
self.config.db_resource_table, data)
except Exception as e:
LOG.error("DB could not create row in resource table: " + str(e))
return False
LOG.info("DB: resource status updated")
return True
def add_app(self, _k, _app_data):
"""Add app to database in music and return True."""
try:
self.music.delete_row_eventually(
self.config.db_keyspace, self.config.db_app_table,
'stack_id', _k)
except Exception as e:
LOG.error("DB: while deleting app: " + str(e))
return False
if _app_data is not None:
data = {
'stack_id': _k,
'app': json.dumps(_app_data)
}
try:
self.music.create_row(self.config.db_keyspace,
self.config.db_app_table, data)
except Exception as e:
LOG.error("DB: while inserting app: " + str(e))
return False
return True
def get_app_info(self, _s_uuid):
"""Get app info for stack id and return as json object."""
json_app = {}
row = {}
try:
row = self.music.read_row(self.config.db_keyspace,
self.config.db_app_table, 'stack_id',
_s_uuid)
except Exception as e:
LOG.error("DB: while reading app info: " + str(e))
return None
if len(row) > 0:
str_app = row[row.keys()[0]]['app']
json_app = json.loads(str_app)
return json_app
# TODO(UNKNOWN): get all other VMs related to this VM
def get_vm_info(self, _s_uuid, _h_uuid, _host):
"""Return vm info connected with ids and host passed in."""
updated = False
json_app = {}
vm_info = {}
row = {}
try:
row = self.music.read_row(self.config.db_keyspace,
self.config.db_app_table, 'stack_id',
_s_uuid)
except Exception as e:
LOG.error("DB could not read row in app table: " + str(e))
return None
if len(row) > 0:
str_app = row[row.keys()[0]]['app']
json_app = json.loads(str_app)
vms = json_app["VMs"]
for vmk, vm in vms.iteritems():
if vmk == _h_uuid:
if vm["status"] != "deleted":
if vm["host"] != _host:
vm["planned_host"] = vm["host"]
vm["host"] = _host
LOG.warning("DB: conflicted placement "
"decision from Ostro")
# TODO(GY): affinity, diversity, exclusivity
# validation check
updated = True
else:
vm["status"] = "scheduled"
LOG.warning("DB: vm was deleted")
updated = True
vm_info = vm
break
else:
LOG.error("MusicHandler.get_vm_info: vm is missing "
"from stack")
else:
LOG.warning("MusicHandler.get_vm_info: not found stack for "
"update = " + _s_uuid)
if updated is True:
if self.add_app(_s_uuid, json_app) is False:
return None
return vm_info
def update_vm_info(self, _s_uuid, _h_uuid):
"""Return true if vm's heat and heat stack ids are updated in db."""
updated = False
json_app = {}
row = {}
try:
row = self.music.read_row(self.config.db_keyspace,
self.config.db_app_table, 'stack_id',
_s_uuid)
except Exception as e:
LOG.error("DB could not read row in app table: " + str(e))
return False
if len(row) > 0:
str_app = row[row.keys()[0]]['app']
json_app = json.loads(str_app)
vms = json_app["VMs"]
for vmk, vm in vms.iteritems():
if vmk == _h_uuid:
if vm["status"] != "deleted":
vm["status"] = "deleted"
LOG.warning("DB: deleted marked")
updated = True
else:
LOG.warning("DB: vm was already deleted")
break
else:
LOG.error("MusicHandler.update_vm_info: vm is missing "
"from stack")
else:
LOG.warning("MusicHandler.update_vm_info: not found "
"stack for update = " + _s_uuid)
if updated is True:
if self.add_app(_s_uuid, json_app) is False:
return False
return True

View File

@ -0,0 +1,300 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from valet.engine.optimizer.app_manager.placement_handler import Placement
class EventHandler(object):
'''Handler to apply events to resource status and placements.'''
def __init__(self, _placement_handler, _app_handler, _resource, _db, _logger):
self.logger = _logger
self.phandler = _placement_handler
self.ahandler = _app_handler
self.resource = _resource
self.db = _db
def handle_events(self, _event_list, _data_lock):
'''Deal with events (vm create and delete, host status).'''
_data_lock.acquire()
for e in _event_list:
if e.host is not None and e.host != "none":
if e.host not in self.resource.hosts.keys():
self.logger.warn("EVENT: host (" + e.host + ") not exists")
continue
if e.method == "build_and_run_instance":
if not self._handle_build_and_run_event(e):
_data_lock.release()
return False
elif e.method == "object_action":
if e.object_name == 'Instance':
if e.vm_state == "active":
if not self._handle_active_instance_event(e):
_data_lock.release()
return False
elif e.vm_state == "deleted":
if not self._handle_delete_instance_event(e):
_data_lock.release()
return False
else:
self.logger.warn("EVENT: unknown event vm_state = " + e.vm_state)
elif e.object_name == 'ComputeNode':
self._handle_compute_event(e)
else:
self.logger.warn("EVENT: unknown object_name = " + e.object_name)
else:
self.logger.warn("EVENT: unknown method = " + e.method)
for e in _event_list:
if not self.db.delete_event(e.event_id):
_data_lock.release()
return False
_data_lock.release()
return True
def _handle_build_and_run_event(self, e):
'''Handle 'build-and-run' event to relate stack_id.'''
self.logger.info("EVENT: got 'build_and_run' for " + e.uuid)
stack_id = None
if e.heat_root_stack_id is not None and e.heat_root_stack_id != "none":
stack_id = e.heat_root_stack_id
else:
self.logger.warn("EVENT: stack_id is none")
orch_id = None
if e.heat_resource_uuid is not None and e.heat_resource_uuid != "none":
orch_id = e.heat_resource_uuid
else:
self.logger.warn("EVENT: orch_id is none")
if stack_id is not None and orch_id is not None:
placement = self.phandler.get_placement(e.uuid)
if placement is None:
return False
elif placement.uuid == "none":
self.logger.warn("miss 'identify' or 'replan' step?")
(vid, host_name) = self.ahandler.update_stack(stack_id, orch_id=orch_id, uuid=e.uuid)
if host_name is not None and host_name != "none":
placement = Placement(e.uuid)
placement.stack_id = stack_id
placement.host = host_name
placement.orch_id = orch_id
placement.state = "building"
placement.timestamp = time.time()
placement.status = "verified"
if not self.phandler.store_placement(e.uuid, placement):
return False
self._update_uuid(orch_id, e.uuid, host_name)
self.resource.update_topology(store=False)
else:
self.logger.warn("EVENT: unknown vm instance!")
else:
if placement.stack_id is not None and placement.stack_id != "none":
if placement.stack_id != stack_id:
self.logger.debug("recorded stack_id = " + placement.stack_id)
self.logger.warn("EVENT: stack_id(" + stack_id + ") is different!")
# FIXME(gjung): update stack_id in placement handler, resource, stack?
else:
self.logger.warn("EVENT: stack_id is missing")
return True
def _handle_active_instance_event(self, e):
'''Handle event for vm activation confirmation.'''
self.logger.info("EVENT: got instance_active for " + e.uuid)
placement = self.phandler.get_placement(e.uuid)
if placement is None:
return False
elif placement.uuid == "none":
self.logger.warn("EVENT: unknown instance!")
placement = Placement(e.uuid)
placement.host = e.host
placement.state = "created"
placement.timestamp = time.time()
placement.status = "verified"
vm_info = {}
vm_info["uuid"] = e.uuid
vm_info["stack_id"] = "none"
vm_info["orch_id"] = "none"
vm_info["name"] = "none"
vm_alloc = {}
vm_alloc["host"] = e.host
vm_alloc["vcpus"] = e.vcpus
vm_alloc["mem"] = e.mem
vm_alloc["local_volume"] = e.local_disk
if self._add_vm_to_host(vm_info, vm_alloc) is True:
self.resource.update_topology(store=False)
if not self.phandler.store_placement(e.uuid, placement):
return False
return True
if placement.host != e.host:
self.logger.warn("EVENT: vm activated in the different host!")
vm_info = {}
vm_info["uuid"] = e.uuid
vm_info["stack_id"] = placement.stack_id
vm_info["orch_id"] = placement.orch_id
vm_info["name"] = "none"
vm_alloc = {}
vm_alloc["host"] = e.host
vm_alloc["vcpus"] = e.vcpus
vm_alloc["mem"] = e.mem
vm_alloc["local_volume"] = e.local_disk
if self._add_vm_to_host(vm_info, vm_alloc) is True:
vm_alloc["host"] = placement.host
self._remove_vm_from_host(e.uuid, vm_alloc)
self._remove_vm_from_groups_of_host(e.uuid, placement.host)
self.resource.update_topology(store=False)
placement.host = e.host
if placement.stack_id is not None or placement.stack_id != "none":
(vid, hk) = self.ahandler.update_stack(placement.stack_id, uuid=e.uuid, host=e.host)
if vid is None:
return False
new_state = None
if placement.state == "planned":
new_state = "created"
elif placement.state == "rebuild":
new_state = "rebuilt"
elif placement.state == "migrate":
new_state = "migrated"
else:
self.logger.warn("EVENT: vm is in incomplete state = " + placement.state)
new_state = "created"
curr_state = "none"
if placement.state is not None:
curr_state = placement.state
self.logger.info("EVENT: state changed from '" + curr_state + "' to '" + new_state + "'")
placement.state = new_state
if not self.phandler.store_placement(e.uuid, placement):
return False
return True
def _handle_delete_instance_event(self, e):
'''Handle event for vm deletion notification.'''
self.logger.info("EVENT: got instance_delete for " + e.uuid)
placement = self.phandler.get_placement(e.uuid)
if placement is None:
return False
elif placement.uuid == "none":
self.logger.warn("EVENT: unknown vm instance!")
return True
if placement.host != e.host:
self.logger.warn("EVENT: vm activated in the different host!")
return True
if placement.state is None or placement.state == "none" or \
placement.state in ("created", "rebuilt", "migrated"):
if placement.stack_id is not None and placement.stack_id != "none":
if not self.ahandler.delete_from_stack(placement.stack_id, uuid=e.uuid):
return False
else:
self.logger.warn("EVENT: stack_id is unknown")
if not self.phandler.delete_placement(e.uuid):
return False
vm_alloc = {}
vm_alloc["host"] = e.host
vm_alloc["vcpus"] = e.vcpus
vm_alloc["mem"] = e.mem
vm_alloc["local_volume"] = e.local_disk
self._remove_vm_from_host(e.uuid, vm_alloc)
self._remove_vm_from_groups(e.uuid, e.host)
self.resource.update_topology(store=False)
else:
self.logger.warn("EVENT: vm is incomplete state for deletion = " + placement.state)
return True
def _handle_compute_event(self, e):
'''Handle event about compute resource change.'''
self.logger.info("EVENT: got compute for " + e.host)
if self.resource.update_host_resources(e.host, e.status) is True:
self.resource.update_host_time(e.host)
self.resource.update_topology(store=False)
def _add_vm_to_host(self, _vm_info, _vm_alloc):
'''Add vm to host.'''
if self.resource.add_vm_to_host(_vm_alloc, _vm_info) is True:
self.resource.update_host_time(_vm_alloc["host"])
return True
return False
def _remove_vm_from_host(self, _uuid, _vm_alloc):
'''Remove deleted vm from host.'''
if self.resource.remove_vm_from_host(_vm_alloc, uuid=_uuid) is True:
self.resource.update_host_time(_vm_alloc["host"])
else:
self.logger.warn("vm (" + _uuid + ") is missing in host while removing")
def _remove_vm_from_groups(self, _uuid, _host_name):
'''Remove deleted vm from groups.'''
host = self.resource.hosts[_host_name]
self.resource.remove_vm_from_groups(host, uuid=_uuid)
def _remove_vm_from_groups_of_host(self, _uuid, _host_name):
'''Remove deleted vm from host of the group.'''
host = self.resource.hosts[_host_name]
self.resource.remove_vm_from_groups_of_host(host, uuid=_uuid)
def _update_uuid(self, _orch_id, _uuid, _host_name):
'''Update physical uuid of placement.'''
host = self.resource.hosts[_host_name]
if host.update_uuid(_orch_id, _uuid) is True:
self.resource.update_host_time(_host_name)
else:
self.logger.warn("fail to update uuid in host = " + host.name)
self.resource.update_uuid_in_groups(_orch_id, _uuid, host)

View File

@ -0,0 +1,86 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.group import LEVEL
class AvailResources(object):
def __init__(self, _level):
self.level = _level
self.avail_hosts = {}
self.candidates = {}
def set_next_avail_hosts(self, _avail_hosts, _resource_of_level):
for hk, h in _avail_hosts.iteritems():
if self.level == "cluster":
if h.cluster_name == _resource_of_level:
self.avail_hosts[hk] = h
elif self.level == "rack":
if h.rack_name == _resource_of_level:
self.avail_hosts[hk] = h
elif self.level == "host":
if h.host_name == _resource_of_level:
self.avail_hosts[hk] = h
def set_next_level(self):
'''Get the next level to search.'''
current_level_index = LEVEL.index(self.level)
next_level_index = current_level_index - 1
if next_level_index < 0:
self.level = LEVEL[0]
else:
self.level = LEVEL[next_level_index]
def set_candidates(self):
if self.level == "cluster":
for _, h in self.avail_hosts.iteritems():
self.candidates[h.cluster_name] = h
elif self.level == "rack":
for _, h in self.avail_hosts.iteritems():
self.candidates[h.rack_name] = h
elif self.level == "host":
self.candidates = self.avail_hosts
def set_candidate(self, _resource_name):
if self.level == "cluster":
for _, h in self.avail_hosts.iteritems():
if h.cluster_name == _resource_name:
self.candidates[_resource_name] = h
break
elif self.level == "rack":
for _, h in self.avail_hosts.iteritems():
if h.rack_name == _resource_name:
self.candidates[_resource_name] = h
break
elif self.level == "host":
if _resource_name in self.avail_hosts.keys():
self.candidates[_resource_name] = self.avail_hosts[_resource_name]
def get_candidate(self, _resource):
candidate = None
if self.level == "cluster":
for _, h in self.avail_hosts.iteritems():
if h.cluster_name == _resource.cluster_name:
candidate = h
break
elif self.level == "rack":
for _, h in self.avail_hosts.iteritems():
if h.rack_name == _resource.rack_name:
candidate = h
elif self.level == "host":
if _resource.host_name in self.avail_hosts.keys():
candidate = self.avail_hosts[_resource.host_name]
return candidate

View File

@ -0,0 +1,304 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import six
import traceback
from valet.engine.resource_manager.resources.datacenter import Datacenter
class Bootstrapper(object):
'''Bootstrap valet-engine.'''
def __init__(self, _resource, _db, _logger):
self.logger = _logger
self.resource = _resource
self.db = _db
self.phandler = None
def set_handlers(self, _placement_handler):
self.phandler = _placement_handler
def load_data(self, _compute, _topology, _metadata):
'''Load all required datacenter resource information.'''
try:
resource_status = self.db.get_resource_status(self.resource.datacenter.name)
if resource_status is None:
return False
if len(resource_status) > 0:
self.resource.load_from_db(resource_status)
self.logger.info("load data from other systems (e.g., nova)")
if not _compute.set_hosts():
return False
if not _topology.set_topology():
return False
if not _metadata.set_groups():
return False
if not _metadata.set_flavors():
return False
self.resource.update_topology()
except Exception:
self.logger.critical("bootstrap failed: " + traceback.format_exc())
return True
def verify_pre_valet_placements(self):
'''Mark if any pre-valet placements were not correctly placed.'''
self.logger.info("verifying pre-valet placements")
for hk, host in self.resource.hosts.iteritems():
for vm_info in host.vm_list:
if "metadata" in vm_info.keys(): # unknown pre-valet placement
placement = self.phandler.get_placement(vm_info["uuid"])
if placement is None:
return False
elif placement.uuid == "none":
status = "not existing vm"
self.logger.warn("invalid placement: " + status)
placement.status = status
if not self.phandler.store_placement(vm_info["uuid"], placement):
return False
else:
if placement.status != "verified":
(status, valet_group_list) = self._verify_pre_valet_placement(hk, vm_info)
if status is None:
return False
elif status == "verified":
placement.status = status
if not self.phandler.store_placement(vm_info["uuid"], placement):
return False
if len(valet_group_list) > 0:
host = self.resource.hosts[hk]
# overwrite if vm exists
self.resource.add_vm_to_groups(host, vm_info, valet_group_list)
else:
self.logger.warn("invalid placement: " + status)
placement.status = status
if not self.phandler.store_placement(vm_info["uuid"], placement):
return False
return True
def _verify_pre_valet_placement(self, _hk, _vm_info):
'''Mark if this pre-valet placement was not correctly placed.'''
status = "verified"
valet_group_list = []
if len(_vm_info["metadata"]) == 0:
status = self._verify_exclusivity(_hk)
else:
metadata = _vm_info["metadata"]
for mk, md in metadata.iteritems():
if mk == "valet":
group_list = []
if isinstance(md, six.string_types):
try:
groups_dict = json.loads(md)
if "groups" in groups_dict.keys():
group_list = groups_dict["groups"]
except Exception:
self.logger.error("valet metadata parsing: " + traceback.format_exc())
status = "wrong valet metadata format"
return (status, [])
else:
if "groups" in md.keys():
group_list = md["groups"]
for gk in group_list:
found = False
for leveled_gk, g in self.resource.groups.iteritems():
if g.group_type in ("EX", "DIV", "AFF") and leveled_gk.split(':')[1] == gk:
group_info = self.db.get_group(gk)
if group_info is None:
return (None, [])
elif len(group_info) == 0:
break
if group_info["members"] is not None and len(group_info["members"]) > 0:
if "tenant_id" in _vm_info.keys():
t = _vm_info["tenant_id"]
if t not in group_info["members"]:
status = "tenant(" + t + ") cannot use group(" + gk + ")"
return (status, [])
valet_group_list.append(leveled_gk)
found = True
break
if not found:
self.logger.warn("unknown group(" + gk + ") was used")
if len(valet_group_list) == 0:
status = self._verify_exclusivity(_hk)
else:
for gk in valet_group_list:
group = self.resource.groups[gk]
if group.group_type == "EX" or group.group_type == "AFF":
status = self._verify_named_affinity(_hk, gk)
if status != "verified":
break
elif group.group_type == "DIV":
status = self._verify_named_diversity(_hk, gk)
if status != "verified":
break
return (status, valet_group_list)
def _verify_exclusivity(self, _hk):
'''Verify if vm was incorrectly placed in an exclusivity group.'''
host = self.resource.hosts[_hk]
for gk, g in host.memberships.iteritems():
if g.group_type == "EX" and gk.split(':')[0] == "host":
return "incorrectly placed in exclusive host"
if host.host_group is not None and host.host_group != "none" and host.host_group != "any":
rack = host.host_group
if not isinstance(rack, Datacenter):
for gk, g in rack.memberships.iteritems():
if g.group_type == "EX" and gk.split(':')[0] == "rack":
return "incorrectly placed in exclusive rack"
if rack.parent_resource is not None and \
rack.parent_resource != "none" and \
rack.parent_resource != "any":
cluster = rack.parent_resource
if not isinstance(cluster, Datacenter):
for gk, g in cluster.memberships.iteritems():
if g.group_type == "EX" and gk.split(':')[0] == "cluster":
return "incorrectly placed in exclusive cluster"
return "verified"
def _verify_named_affinity(self, _hk, _gk):
'''Verify if vm was correctly placed in an exclusivity or affinity group.'''
group = self.resource.groups[_gk]
g_id = _gk.split(':')
level = g_id[0]
group_name = g_id[1]
group_type = None
if group.group_type == "EX":
group_type = "exclusivity"
else:
group_type = "affinity"
if level == "host":
if _hk not in group.vms_per_host.keys():
return "placed in non-" + group_type + " host of group (" + group_name + ")"
elif level == "rack":
host = self.resource.hosts[_hk]
if host.host_group is not None and host.host_group != "none" and host.host_group != "any":
rack = host.host_group
if isinstance(rack, Datacenter):
return "placed in non-existing rack level " + group_type + " of group (" + group_name + ")"
else:
if rack.name not in group.vms_per_host.keys():
return "placed in non-" + group_type + " rack of group (" + group_name + ")"
else:
return "placed in non-existing rack level " + group_type + " of group (" + group_name + ")"
elif level == "cluster":
host = self.resource.hosts[_hk]
if host.host_group is not None and host.host_group != "none" and host.host_group != "any":
rack = host.host_group
if isinstance(rack, Datacenter):
return "placed in non-existing cluster level " + group_type + " of group (" + group_name + ")"
else:
if rack.parent_resource is not None and \
rack.parent_resource != "none" and \
rack.parent_resource != "any":
cluster = rack.parent_resource
if isinstance(cluster, Datacenter):
return "placed in non-existing cluster level " + group_type
else:
if cluster.name not in group.vms_per_host.keys():
return "placed in non-" + group_type + " cluster of group (" + group_name + ")"
else:
return "placed in non-existing cluster level " + group_type
else:
return "placed in non-existing cluster level " + group_type
else:
return "unknown level"
return "verified"
def _verify_named_diversity(self, _hk, _gk):
'''Verify if vm was correctly placed in a diversity group.'''
group = self.resource.groups[_gk]
g_id = _gk.split(':')
level = g_id[0]
group_name = g_id[1]
if level == "host":
if _hk in group.vms_per_host.keys():
return "incorrectly placed in diversity host of group (" + group_name + ")"
elif level == "rack":
host = self.resource.hosts[_hk]
if host.host_group is not None and host.host_group != "none" and host.host_group != "any":
rack = host.host_group
if isinstance(rack, Datacenter):
return "placed in non-existing rack level diversity of group (" + group_name + ")"
else:
if rack.name in group.vms_per_host.keys():
return "placed in diversity rack of group (" + group_name + ")"
else:
return "placed in non-existing rack level diversity of group (" + group_name + ")"
elif level == "cluster":
host = self.resource.hosts[_hk]
if host.host_group is not None and host.host_group != "none" and host.host_group != "any":
rack = host.host_group
if isinstance(rack, Datacenter):
return "placed in non-existing cluster level diversity of group (" + group_name + ")"
else:
if rack.parent_resource is not None and \
rack.parent_resource != "none" and \
rack.parent_resource != "any":
cluster = rack.parent_resource
if isinstance(cluster, Datacenter):
return "placed in non-existing cluster level diversity of group (" + group_name + ")"
else:
if cluster.name in group.vms_per_host.keys():
return "placed in diversity cluster of group (" + group_name + ")"
else:
return "placed in non-existing cluster level diversity of group (" + group_name + ")"
else:
return "placed in non-existing cluster level diversity of group (" + group_name + ")"
else:
return "unknown level"
return "verified"

View File

@ -14,416 +14,83 @@
# limitations under the License.
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.ostro.openstack_filters \
from valet.engine.optimizer.ostro.filters.aggregate_instance_filter \
import AggregateInstanceExtraSpecsFilter
from valet.engine.optimizer.ostro.openstack_filters \
from valet.engine.optimizer.ostro.filters.az_filter \
import AvailabilityZoneFilter
from valet.engine.optimizer.ostro.openstack_filters import CoreFilter
from valet.engine.optimizer.ostro.openstack_filters import DiskFilter
from valet.engine.optimizer.ostro.openstack_filters import RamFilter
from valet.engine.optimizer.ostro.filters.cpu_filter import CPUFilter
from valet.engine.optimizer.ostro.filters.disk_filter import DiskFilter
from valet.engine.optimizer.ostro.filters.diversity_filter \
import DiversityFilter
from valet.engine.optimizer.ostro.filters.mem_filter import MemFilter
from valet.engine.optimizer.ostro.filters.named_affinity_filter \
import NamedAffinityFilter
from valet.engine.optimizer.ostro.filters.named_diversity_filter \
import NamedDiversityFilter
from valet.engine.optimizer.ostro.filters.named_exclusivity_filter \
import NamedExclusivityFilter
from valet.engine.optimizer.ostro.filters.no_exclusivity_filter \
import NoExclusivityFilter
LOG = log.getLogger(__name__)
class ConstraintSolver(object):
"""ConstraintSolver."""
"""Solver to filter out candidate hosts."""
def __init__(self):
"""Initialization."""
"""Instantiate filters to help enforce constraints."""
self.openstack_AZ = AvailabilityZoneFilter()
self.openstack_AIES = AggregateInstanceExtraSpecsFilter()
self.openstack_R = RamFilter()
self.openstack_C = CoreFilter()
self.openstack_D = DiskFilter()
self.filter_list = []
self.filter_list.append(NamedAffinityFilter())
self.filter_list.append(NamedDiversityFilter())
self.filter_list.append(DiversityFilter())
self.filter_list.append(NamedExclusivityFilter())
self.filter_list.append(NoExclusivityFilter())
self.filter_list.append(AvailabilityZoneFilter())
self.filter_list.append(AggregateInstanceExtraSpecsFilter())
self.filter_list.append(CPUFilter())
self.filter_list.append(MemFilter())
self.filter_list.append(DiskFilter())
self.status = "success"
def compute_candidate_list(self, _level, _n, _node_placements,
_avail_resources, _avail_logical_groups):
"""Compute candidate list for the given VGroup or VM."""
def get_candidate_list(self, _n, _node_placements, _avail_resources,
_avail_groups):
"""Filter candidate hosts using a list of filters."""
level = _avail_resources.level
candidate_list = []
"""When replanning."""
if _n.node.host is not None and len(_n.node.host) > 0:
for hk in _n.node.host:
for ark, ar in _avail_resources.iteritems():
if hk == ark:
candidate_list.append(ar)
else:
for _, r in _avail_resources.iteritems():
for _, r in _avail_resources.candidates.iteritems():
candidate_list.append(r)
if len(candidate_list) == 0:
self.status = "no candidate for node = " + _n.node.name
LOG.warning(self.status)
return candidate_list
else:
LOG.debug("ConstraintSolver: num of candidates = " +
str(len(candidate_list)))
"""Availability zone constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
if (isinstance(_n.node, VM) and _n.node.availability_zone
is not None) or (isinstance(_n.node, VGroup) and
len(_n.node.availability_zone_list) > 0):
self._constrain_availability_zone(_level, _n, candidate_list)
if len(candidate_list) == 0:
self.status = "violate availability zone constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
self.status = "no candidate for node = " + _n.orch_id
LOG.warn(self.status)
return candidate_list
"""Host aggregate constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
if len(_n.node.extra_specs_list) > 0:
self._constrain_host_aggregates(_level, _n, candidate_list)
if len(candidate_list) == 0:
self.status = "violate host aggregate constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
LOG.debug("num of candidates = " + str(len(candidate_list)))
"""CPU capacity constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
self._constrain_cpu_capacity(_level, _n, candidate_list)
if len(candidate_list) == 0:
self.status = "violate cpu capacity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
for f in self.filter_list:
f.init_condition()
"""Memory capacity constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
self._constrain_mem_capacity(_level, _n, candidate_list)
if len(candidate_list) == 0:
self.status = "violate memory capacity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
"""Local disk capacity constraint."""
if isinstance(_n.node, VGroup) or isinstance(_n.node, VM):
self._constrain_local_disk_capacity(_level, _n, candidate_list)
if len(candidate_list) == 0:
self.status = "violate local disk capacity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
""" diversity constraint """
if len(_n.node.diversity_groups) > 0:
for _, diversity_id in _n.node.diversity_groups.iteritems():
if diversity_id.split(":")[0] == _level:
if diversity_id in _avail_logical_groups.keys():
self._constrain_diversity_with_others(_level,
diversity_id,
candidate_list)
if len(candidate_list) == 0:
break
if len(candidate_list) == 0:
self.status = "violate diversity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
else:
self._constrain_diversity(_level, _n, _node_placements,
candidate_list)
if len(candidate_list) == 0:
self.status = "violate diversity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
"""Exclusivity constraint."""
exclusivities = self.get_exclusivities(_n.node.exclusivity_groups,
_level)
if len(exclusivities) > 1:
self.status = "violate exclusivity constraint (more than one " \
"exclusivity) for node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
if not f.check_pre_condition(level, _n, _node_placements,
_avail_groups):
if f.status is not None:
self.status = f.status
LOG.error(self.status)
return []
else:
if len(exclusivities) == 1:
exclusivity_id = exclusivities[exclusivities.keys()[0]]
if exclusivity_id.split(":")[0] == _level:
self._constrain_exclusivity(_level, exclusivity_id,
candidate_list)
if len(candidate_list) == 0:
self.status = "violate exclusivity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
else:
self._constrain_non_exclusivity(_level, candidate_list)
if len(candidate_list) == 0:
self.status = "violate non-exclusivity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
continue
"""Affinity constraint."""
affinity_id = _n.get_affinity_id() # level:name, except name == "any"
if affinity_id is not None:
if affinity_id.split(":")[0] == _level:
if affinity_id in _avail_logical_groups.keys():
self._constrain_affinity(_level, affinity_id,
candidate_list)
if len(candidate_list) == 0:
self.status = "violate affinity constraint for " \
"node = " + _n.node.name
LOG.error("ConstraintSolver: " + self.status)
return candidate_list
return candidate_list
"""
Constraint modules.
"""
def _constrain_affinity(self, _level, _affinity_id, _candidate_list):
conflict_list = []
for r in _candidate_list:
if self.exist_group(_level, _affinity_id, "AFF", r) is False:
if r not in conflict_list:
conflict_list.append(r)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def _constrain_diversity_with_others(self, _level, _diversity_id,
_candidate_list):
conflict_list = []
for r in _candidate_list:
if self.exist_group(_level, _diversity_id, "DIV", r) is True:
if r not in conflict_list:
conflict_list.append(r)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def exist_group(self, _level, _id, _group_type, _candidate):
"""Check if group esists."""
"""Return True if there exists a group within the candidate's
membership list that matches the provided id and group type.
"""
match = False
memberships = _candidate.get_memberships(_level)
for lgk, lgr in memberships.iteritems():
if lgr.group_type == _group_type and lgk == _id:
match = True
break
return match
def _constrain_diversity(self, _level, _n, _node_placements,
_candidate_list):
conflict_list = []
for r in _candidate_list:
if self.conflict_diversity(_level, _n, _node_placements, r):
if r not in conflict_list:
conflict_list.append(r)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def conflict_diversity(self, _level, _n, _node_placements, _candidate):
"""Return True if the candidate has a placement conflict."""
conflict = False
for v in _node_placements.keys():
diversity_level = _n.get_common_diversity(v.diversity_groups)
if diversity_level != "ANY" and \
LEVELS.index(diversity_level) >= \
LEVELS.index(_level):
if diversity_level == "host":
if _candidate.cluster_name == \
_node_placements[v].cluster_name and \
_candidate.rack_name == \
_node_placements[v].rack_name and \
_candidate.host_name == \
_node_placements[v].host_name:
conflict = True
break
elif diversity_level == "rack":
if _candidate.cluster_name == \
_node_placements[v].cluster_name and \
_candidate.rack_name == _node_placements[v].rack_name:
conflict = True
break
elif diversity_level == "cluster":
if _candidate.cluster_name == \
_node_placements[v].cluster_name:
conflict = True
break
return conflict
def _constrain_non_exclusivity(self, _level, _candidate_list):
conflict_list = []
for r in _candidate_list:
if self.conflict_exclusivity(_level, r) is True:
if r not in conflict_list:
conflict_list.append(r)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def conflict_exclusivity(self, _level, _candidate):
"""Check for an exculsivity conflict."""
"""Check if the candidate contains an exclusivity group within its
list of memberships."""
conflict = False
memberships = _candidate.get_memberships(_level)
for mk in memberships.keys():
if memberships[mk].group_type == "EX" and \
mk.split(":")[0] == _level:
conflict = True
return conflict
def get_exclusivities(self, _exclusivity_groups, _level):
"""Return a list of filtered exclusivities."""
"""Extract and return only those exclusivities that exist at the
specified level.
"""
exclusivities = {}
for exk, level in _exclusivity_groups.iteritems():
if level.split(":")[0] == _level:
exclusivities[exk] = level
return exclusivities
def _constrain_exclusivity(self, _level, _exclusivity_id, _candidate_list):
candidate_list = self._get_exclusive_candidates(
_level, _exclusivity_id, _candidate_list)
candidate_list = f.filter_candidates(level, _n, candidate_list)
if len(candidate_list) == 0:
candidate_list = self._get_hibernated_candidates(_level,
_candidate_list)
_candidate_list[:] = [x for x in _candidate_list
if x in candidate_list]
else:
_candidate_list[:] = [x for x in _candidate_list
if x in candidate_list]
self.status = "violate {} constraint for node {} ".format(f.name, _n.orch_id)
LOG.error(self.status)
return []
def _get_exclusive_candidates(self, _level, _exclusivity_id,
_candidate_list):
candidate_list = []
for r in _candidate_list:
if self.exist_group(_level, _exclusivity_id, "EX", r):
if r not in candidate_list:
candidate_list.append(r)
LOG.debug("pass " + f.name + " with num of candidates = " + str(len(candidate_list)))
return candidate_list
def _get_hibernated_candidates(self, _level, _candidate_list):
candidate_list = []
for r in _candidate_list:
if self.check_hibernated(_level, r) is True:
if r not in candidate_list:
candidate_list.append(r)
return candidate_list
def check_hibernated(self, _level, _candidate):
"""Check if the candidate is hibernated.
Return True if the candidate has no placed VMs at the specified
level.
"""
match = False
num_of_placed_vms = _candidate.get_num_of_placed_vms(_level)
if num_of_placed_vms == 0:
match = True
return match
def _constrain_host_aggregates(self, _level, _n, _candidate_list):
conflict_list = []
for r in _candidate_list:
if self.check_host_aggregates(_level, r, _n.node) is False:
if r not in conflict_list:
conflict_list.append(r)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def check_host_aggregates(self, _level, _candidate, _v):
"""Check if candidate passes aggregate instance extra specs.
Return true if the candidate passes the aggregate instance extra specs
zone filter.
"""
return self.openstack_AIES.host_passes(_level, _candidate, _v)
def _constrain_availability_zone(self, _level, _n, _candidate_list):
conflict_list = []
for r in _candidate_list:
if self.check_availability_zone(_level, r, _n.node) is False:
if r not in conflict_list:
conflict_list.append(r)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def check_availability_zone(self, _level, _candidate, _v):
"""Check if the candidate passes the availability zone filter."""
return self.openstack_AZ.host_passes(_level, _candidate, _v)
def _constrain_cpu_capacity(self, _level, _n, _candidate_list):
conflict_list = []
for ch in _candidate_list:
if self.check_cpu_capacity(_level, _n.node, ch) is False:
conflict_list.append(ch)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def check_cpu_capacity(self, _level, _v, _candidate):
"""Check if the candidate passes the core filter."""
return self.openstack_C.host_passes(_level, _candidate, _v)
def _constrain_mem_capacity(self, _level, _n, _candidate_list):
conflict_list = []
for ch in _candidate_list:
if self.check_mem_capacity(_level, _n.node, ch) is False:
conflict_list.append(ch)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def check_mem_capacity(self, _level, _v, _candidate):
"""Check if the candidate passes the RAM filter."""
return self.openstack_R.host_passes(_level, _candidate, _v)
def _constrain_local_disk_capacity(self, _level, _n, _candidate_list):
conflict_list = []
for ch in _candidate_list:
if self.check_local_disk_capacity(_level, _n.node, ch) is False:
conflict_list.append(ch)
_candidate_list[:] = [
c for c in _candidate_list if c not in conflict_list]
def check_local_disk_capacity(self, _level, _v, _candidate):
"""Check if the candidate passes the disk filter."""
return self.openstack_D.host_passes(_level, _candidate, _v)

View File

@ -0,0 +1,104 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filter_utils
import six
_SCOPE = 'aggregate_instance_extra_specs'
class AggregateInstanceExtraSpecsFilter(object):
"""AggregateInstanceExtraSpecsFilter works with InstanceType records."""
def __init__(self):
self.name = "aggregate-instance-extra-specs"
self.status = None
def init_condition(self):
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
if len(_v.extra_specs_list) > 0:
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, _v, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _v, _candidate):
"""Check given candidate host if instance's extra specs matches to metadata."""
extra_specs_list = []
for extra_specs in _v.extra_specs_list:
if "valet" not in extra_specs.keys() and "host_aggregates" not in extra_specs.keys():
extra_specs_list.append(extra_specs)
if len(extra_specs_list) == 0:
return True
metadatas = filter_utils.aggregate_metadata_get_by_host(_level, _candidate)
matched_group_list = []
for extra_specs in extra_specs_list:
for lgk, metadata in metadatas.iteritems():
if self._match_metadata(_candidate.get_resource_name(_level), lgk, extra_specs, metadata):
matched_group_list.append(lgk)
break
else:
return False
for extra_specs in _v.extra_specs_list:
if "host_aggregates" in extra_specs.keys():
extra_specs["host_aggregates"] = matched_group_list
break
else:
host_aggregate_extra_specs = {}
host_aggregate_extra_specs["host_aggregates"] = matched_group_list
_v.extra_specs_list.append(host_aggregate_extra_specs)
return True
def _match_metadata(self, _h_name, _lg_name, _extra_specs, _metadata):
for key, req in six.iteritems(_extra_specs):
# Either not scope format, or aggregate_instance_extra_specs scope
scope = key.split(':', 1)
if len(scope) > 1:
if scope[0] != _SCOPE:
continue
else:
del scope[0]
key = scope[0]
if key == "host_aggregates":
continue
aggregate_vals = _metadata.get(key, None)
if not aggregate_vals:
return False
for aggregate_val in aggregate_vals:
if filter_utils.match(aggregate_val, req):
break
else:
return False
return True

View File

@ -0,0 +1,71 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import filter_utils
from valet.engine.optimizer.app_manager.group import Group
from valet.engine.optimizer.app_manager.vm import VM
class AvailabilityZoneFilter(object):
""" Filters Hosts by availability zone.
Works with aggregate metadata availability zones, using the key
'availability_zone'
Note: in theory a compute node can be part of multiple availability_zones
"""
def __init__(self):
self.name = "availability-zone"
self.status = None
def init_condition(self):
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
if (isinstance(_v, VM) and _v.availability_zone is not None) or \
(isinstance(_v, Group) and len(_v.availability_zone_list) > 0):
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, _v, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _v, _candidate):
az_request_list = []
if isinstance(_v, VM):
az_request_list.append(_v.availability_zone)
else:
for az in _v.availability_zone_list:
az_request_list.append(az)
if len(az_request_list) == 0:
return True
availability_zone_list = filter_utils.availability_zone_get_by_host(_level, _candidate)
for azr in az_request_list:
if azr not in availability_zone_list:
return False
return True

View File

@ -0,0 +1,53 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class CPUFilter(object):
def __init__(self):
self.name = "cpu"
self.status = None
def init_condition(self):
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
return True
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, _v, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _v, _candidate):
"""Return True if host has sufficient CPU cores."""
(vCPUs, avail_vCPUs) = _candidate.get_vCPUs(_level)
instance_vCPUs = _v.vCPUs
# Do not allow an instance to overcommit against itself, only against other instances.
if instance_vCPUs > vCPUs:
return False
if avail_vCPUs < instance_vCPUs:
return False
return True

View File

@ -0,0 +1,48 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class DiskFilter(object):
def __init__(self):
self.name = "disk"
self.status = None
def init_condition(self):
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
return True
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, _v, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _v, _candidate):
"""Filter based on disk usage."""
requested_disk = _v.local_volume_size
(_, usable_disk) = _candidate.get_local_disk(_level)
if not usable_disk >= requested_disk:
return False
return True

View File

@ -0,0 +1,73 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.group import LEVEL, Group
from valet.engine.optimizer.ostro.search_helper import check_vm_grouping
class DiversityFilter(object):
def __init__(self):
self.name = "diversity"
self.node_placements = None
self.status = None
def init_condition(self):
self.node_placements = None
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
if len(_v.diversity_groups) > 0:
self.node_placements = _node_placements
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, _v, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _v, _candidate):
"""Filter based on diversity groups."""
for v in self.node_placements.keys():
if isinstance(v, Group):
if check_vm_grouping(v, _v.orch_id) is True:
continue
diversity_level = _v.get_common_diversity(v.diversity_groups)
if diversity_level != "ANY" and LEVEL.index(diversity_level) >= LEVEL.index(_level):
if diversity_level == "host":
if _candidate.cluster_name == self.node_placements[v].cluster_name and \
_candidate.rack_name == self.node_placements[v].rack_name and \
_candidate.host_name == self.node_placements[v].host_name:
return False
elif diversity_level == "rack":
if _candidate.cluster_name == self.node_placements[v].cluster_name and \
_candidate.rack_name == self.node_placements[v].rack_name:
return False
elif diversity_level == "cluster":
if _candidate.cluster_name == self.node_placements[v].cluster_name:
return False
return True

View File

@ -73,16 +73,15 @@ def match(value, req):
def aggregate_metadata_get_by_host(_level, _host, _key=None):
"""Return a dict of metadata for a specific host."""
"""Base dict on a metadata key. If the key is not provided,
return a dict of all metadata.
"""Returns a dict of all metadata based on a metadata key for a specific
host. If the key is not provided, returns a dict of all metadata.
"""
metadatas = {}
logical_groups = _host.get_memberships(_level)
groups = _host.get_memberships(_level)
for lgk, lg in logical_groups.iteritems():
for lgk, lg in groups.iteritems():
if lg.group_type == "AGGR":
if _key is None or _key in lg.metadata:
metadata = collections.defaultdict(set)
@ -99,8 +98,8 @@ def availability_zone_get_by_host(_level, _host):
"""Return a list of availability zones for a specific host."""
availability_zone_list = []
logical_groups = _host.get_memberships(_level)
for lgk, lg in logical_groups.iteritems():
groups = _host.get_memberships(_level)
for lgk, lg in groups.iteritems():
if lg.group_type == "AZ":
availability_zone_list.append(lgk)

View File

@ -0,0 +1,52 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class MemFilter(object):
def __init__(self):
self.name = "mem"
self.status = None
def init_condition(self):
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
return True
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, _v, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _v, _candidate):
"""Only return hosts with sufficient available RAM."""
requested_ram = _v.mem # MB
(total_ram, usable_ram) = _candidate.get_mem(_level)
# Do not allow an instance to overcommit against itself, only against other instances.
if not total_ram >= requested_ram:
return False
if not usable_ram >= requested_ram:
return False
return True

View File

@ -0,0 +1,63 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.group import Group
class NamedAffinityFilter(object):
def __init__(self):
self.name = "named-affinity"
self.affinity_id = None
self.status = None
def init_condition(self):
self.affinity_id = None
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
if isinstance(_v, Group):
affinity_id = _v.get_affinity_id() # level:name, except name == "any"
if affinity_id is not None:
# NOTE(gjung): do not depend on _level not like exclusivity
if affinity_id in _avail_groups.keys():
self.affinity_id = affinity_id
if self.affinity_id is not None:
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _candidate):
"""Filter based on named affinity group."""
# NOTE(gjung): do not depend on _level not like exclusivity
memberships = _candidate.get_all_memberships(_level)
for lgk, lgr in memberships.iteritems():
if lgr.group_type == "AFF" and lgk == self.affinity_id:
return True
return False

View File

@ -0,0 +1,61 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NamedDiversityFilter(object):
def __init__(self):
self.name = "named-diversity"
self.diversity_list = []
self.status = None
def init_condition(self):
self.diversity_list = []
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
if len(_v.diversity_groups) > 0:
for _, diversity_id in _v.diversity_groups.iteritems():
if diversity_id.split(":")[0] == _level:
if diversity_id in _avail_groups.keys():
self.diversity_list.append(diversity_id)
if len(self.diversity_list) > 0:
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _candidate):
"""Filter based on named diversity groups."""
for diversity_id in self.diversity_list:
memberships = _candidate.get_memberships(_level)
for lgk, lgr in memberships.iteritems():
if lgr.group_type == "DIV" and lgk == diversity_id:
return False
return True

View File

@ -0,0 +1,82 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NamedExclusivityFilter(object):
def __init__(self):
self.name = "named-exclusivity"
self.exclusivity_id = None
self.status = None
def init_condition(self):
self.exclusivity_id = None
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
exclusivities = _v.get_exclusivities(_level)
if len(exclusivities) > 1:
self.status = "multiple exclusivities for node = " + _v.orch_id
return False
if len(exclusivities) == 1:
exclusivity_id = exclusivities[exclusivities.keys()[0]]
# NOTE(gjung): possibly miss host that is claimed for the named exclusivity
if exclusivity_id.split(":")[0] == _level:
self.exclusivity_id = exclusivity_id
if self.exclusivity_id is not None:
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
candidate_list = self._get_candidates(_level, _candidate_list)
return candidate_list
def _get_candidates(self, _level, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_exclusive_candidate(_level, c) is True or \
self._check_empty(_level, c) is True:
candidate_list.append(c)
return candidate_list
def _check_exclusive_candidate(self, _level, _candidate):
# NOTE(gjung): possibly miss host that is claimed for the named exclusivity
memberships = _candidate.get_memberships(_level)
for lgk, lgr in memberships.iteritems():
if lgr.group_type == "EX" and lgk == self.exclusivity_id:
return True
return False
def _check_empty(self, _level, _candidate):
num_of_placed_vms = _candidate.get_num_of_placed_vms(_level)
if num_of_placed_vms == 0:
return True
return False

View File

@ -0,0 +1,51 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class NoExclusivityFilter(object):
def __init__(self):
self.name = "no-exclusivity"
self.status = None
def init_condition(self):
self.status = None
def check_pre_condition(self, _level, _v, _node_placements, _avail_groups):
exclusivities = _v.get_exclusivities(_level)
if len(exclusivities) == 0:
return True
else:
return False
def filter_candidates(self, _level, _v, _candidate_list):
candidate_list = []
for c in _candidate_list:
if self._check_candidate(_level, c):
candidate_list.append(c)
return candidate_list
def _check_candidate(self, _level, _candidate):
memberships = _candidate.get_memberships(_level)
for mk in memberships.keys():
if memberships[mk].group_type == "EX" and mk.split(":")[0] == _level:
return False
return True

View File

@ -1,195 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import six
from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.ostro import openstack_utils
_SCOPE = 'aggregate_instance_extra_specs'
# FIXME(GJ): make extensible
class AggregateInstanceExtraSpecsFilter(object):
"""AggregateInstanceExtraSpecsFilter works with InstanceType records."""
# Aggregate data and instance type does not change within a request
run_filter_once_per_request = True
def __init__(self):
"""Initialization."""
def host_passes(self, _level, _host, _v):
"""Return a list of hosts that can create instance_type."""
"""Check that the extra specs associated with the instance type match
the metadata provided by aggregates. If not present return False."""
# If 'extra_specs' is not present or extra_specs are empty then we
# need not proceed further
extra_specs_list = []
for extra_specs in _v.extra_specs_list:
if "host_aggregates" not in extra_specs.keys():
extra_specs_list.append(extra_specs)
if len(extra_specs_list) == 0:
return True
metadatas = openstack_utils.aggregate_metadata_get_by_host(_level,
_host)
matched_logical_group_list = []
for extra_specs in extra_specs_list:
for lgk, metadata in metadatas.iteritems():
if self._match_metadata(_host.get_resource_name(_level), lgk,
extra_specs, metadata) is True:
matched_logical_group_list.append(lgk)
break
else:
return False
for extra_specs in _v.extra_specs_list:
if "host_aggregates" in extra_specs.keys():
extra_specs["host_aggregates"] = matched_logical_group_list
break
else:
host_aggregate_extra_specs = {}
host_aggregate_extra_specs["host_aggregates"] = \
matched_logical_group_list
_v.extra_specs_list.append(host_aggregate_extra_specs)
return True
def _match_metadata(self, _h_name, _lg_name, _extra_specs, _metadata):
for key, req in six.iteritems(_extra_specs):
# Either not scope format, or aggregate_instance_extra_specs scope
scope = key.split(':', 1)
if len(scope) > 1:
if scope[0] != _SCOPE:
continue
else:
del scope[0]
key = scope[0]
if key == "host_aggregates":
continue
aggregate_vals = _metadata.get(key, None)
if not aggregate_vals:
return False
for aggregate_val in aggregate_vals:
if openstack_utils.match(aggregate_val, req):
break
else:
return False
return True
# NOTE: originally, OpenStack used the metadata of host_aggregate
class AvailabilityZoneFilter(object):
"""AvailabilityZoneFilter filters Hosts by availability zone."""
"""Work with aggregate metadata availability zones, using the key
'availability_zone'
Note: in theory a compute node can be part of multiple availability_zones
"""
# Availability zones do not change within a request
run_filter_once_per_request = True
def __init__(self):
"""Initialization."""
def host_passes(self, _level, _host, _v):
"""Return True if all availalibility zones in _v exist in the host."""
az_request_list = []
if isinstance(_v, VM):
az_request_list.append(_v.availability_zone)
else:
for az in _v.availability_zone_list:
az_request_list.append(az)
if len(az_request_list) == 0:
return True
availability_zone_list = \
openstack_utils.availability_zone_get_by_host(_level, _host)
for azr in az_request_list:
if azr not in availability_zone_list:
return False
return True
class RamFilter(object):
"""RamFilter."""
def __init__(self):
"""Initialization."""
def host_passes(self, _level, _host, _v):
"""Return True if host has sufficient available RAM."""
requested_ram = _v.mem # MB
(total_ram, usable_ram) = _host.get_mem(_level)
# Do not allow an instance to overcommit against itself, only against
# other instances.
if not total_ram >= requested_ram:
return False
if not usable_ram >= requested_ram:
return False
return True
class CoreFilter(object):
"""CoreFilter."""
def __init__(self):
"""Initialization."""
def host_passes(self, _level, _host, _v):
"""Return True if host has sufficient CPU cores."""
(vCPUs, avail_vCPUs) = _host.get_vCPUs(_level)
instance_vCPUs = _v.vCPUs
# Do not allow an instance to overcommit against itself, only against
# other instances.
if instance_vCPUs > vCPUs:
return False
if avail_vCPUs < instance_vCPUs:
return False
return True
class DiskFilter(object):
"""DiskFilter."""
def __init__(self):
"""Initialization."""
def host_passes(self, _level, _host, _v):
"""Filter based on disk usage."""
requested_disk = _v.local_volume_size
(_, usable_disk) = _host.get_local_disk(_level)
if not usable_disk >= requested_disk:
return False
return True

View File

@ -14,179 +14,285 @@
# limitations under the License.
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.app_manager.group import Group
from valet.engine.optimizer.app_manager.vm import VM
from valet.engine.optimizer.ostro.search import Search
LOG = log.getLogger(__name__)
# FIXME(GJ): make search algorithm pluggable
# NOTE(GJ): do not deal with Volume placements at this version
class Optimizer(object):
"""Optimizer."""
def __init__(self, _resource):
"""Initialization."""
self.resource = _resource
"""Optimizer to compute the optimal placements."""
def __init__(self):
self.resource = None
self.search = Search()
self.status = "success"
def plan(self, _app_topology):
"""Scheduling placements of given app."""
def place(self, _app_topology):
"""Perform a replan, migration, or create operation."""
"""Return a placement map for VMs, Volumes, and VGroups."""
success = False
self.resource = _app_topology.resource
uuid_map = None
place_type = None
if _app_topology.action != "ping" and \
_app_topology.action != "identify":
_app_topology.set_weight()
_app_topology.set_optimization_priority()
if len(_app_topology.exclusion_list_map) > 0:
place_type = "migration"
if _app_topology.action == "create":
if self.search.plan(_app_topology) is True:
LOG.debug("done search")
if len(_app_topology.candidate_list_map) > 0: # ad-hoc
self._update_placement_states(_app_topology)
LOG.debug("done update states")
if _app_topology.status == "success":
self._update_placement_hosts(_app_topology)
LOG.debug("done update hosts")
self._update_resource_status(_app_topology)
LOG.debug("done update resource status")
else:
if ((len(_app_topology.old_vm_map) > 0 or
len(_app_topology.planned_vm_map) > 0) and
len(_app_topology.candidate_list_map) > 0):
place_type = "replan"
if _app_topology.status == "success":
_app_topology.status = "failed"
elif _app_topology.action == "update":
if self.search.re_plan(_app_topology) is True:
LOG.debug("done search")
self._update_placement_states(_app_topology)
if _app_topology.status == "success":
LOG.debug("done update states")
self._update_placement_hosts(_app_topology)
LOG.debug("done update hosts")
self._delete_old_placements(_app_topology.old_vm_map)
self._update_resource_status(_app_topology)
LOG.debug("done update resource status")
else:
place_type = "create"
if _app_topology.status == "success":
_app_topology.status = "failed"
if place_type == "migration":
vm_id = _app_topology.exclusion_list_map.keys()[0]
candidate_host_list = []
for hk in self.resource.hosts.keys():
if hk not in _app_topology.exclusion_list_map[vm_id]:
candidate_host_list.append(hk)
_app_topology.candidate_list_map[vm_id] = candidate_host_list
elif _app_topology.action == "replan":
orch_id = _app_topology.id_map.keys()[0]
host_name = _app_topology.get_placement_host(orch_id)
if place_type == "replan" or place_type == "migration":
success = self.search.re_place_nodes(_app_topology, self.resource)
if success is True:
if len(_app_topology.old_vm_map) > 0:
uuid_map = self._delete_old_vms(_app_topology.old_vm_map)
self.resource.update_topology(store=False)
if host_name != "none" and \
host_name in _app_topology.candidate_list_map[orch_id]:
LOG.warn("vm is already placed in one of candidate hosts")
if not _app_topology.update_placement_state(orch_id,
host=host_name):
LOG.error(_app_topology.status)
else:
success = self.search.place_nodes(_app_topology, self.resource)
LOG.debug("done update state")
if success is True:
placement_map = {}
for v in self.search.node_placements.keys():
node_placement = self.search.node_placements[v]
uuid = _app_topology.get_placement_uuid(orch_id)
host = self.resource.hosts[host_name]
if not host.exist_vm(uuid=uuid):
self._update_uuid(orch_id, uuid, host_name)
LOG.debug("done update uuid in host")
elif self.search.re_plan(_app_topology) is True:
LOG.debug("done search")
self._update_placement_states(_app_topology)
if _app_topology.status == "success":
LOG.debug("done update states")
self._update_placement_hosts(_app_topology)
LOG.debug("done update hosts")
self._delete_old_placements(_app_topology.old_vm_map)
self._update_resource_status(_app_topology)
LOG.debug("done update resource status")
else:
# FIXME(gjung): if 'replan' fails, remove all pending vms?
if _app_topology.status == "success":
_app_topology.status = "failed"
elif _app_topology.action == "identify":
if not _app_topology.update_placement_state(_app_topology.id_map.keys()[0]):
LOG.error(_app_topology.status)
else:
LOG.debug("done update state")
orch_id = _app_topology.id_map.keys()[0]
uuid = _app_topology.get_placement_uuid(orch_id)
host_name = _app_topology.get_placement_host(orch_id)
self._update_uuid(orch_id, uuid, host_name)
LOG.debug("done update uuid in host")
elif _app_topology.action == "migrate":
if self.search.re_plan(_app_topology) is True:
self._update_placement_states(_app_topology)
if _app_topology.status == "success":
self._update_placement_hosts(_app_topology)
self._delete_old_placements(_app_topology.old_vm_map)
self._update_resource_status(_app_topology)
else:
if _app_topology.status == "success":
_app_topology.status = "failed"
def _update_placement_states(self, _app_topology):
"""Update state of each placement."""
for v, p in self.search.node_placements.iteritems():
if isinstance(v, VM):
placement_map[v] = node_placement.host_name
elif isinstance(v, VGroup):
if not _app_topology.update_placement_state(v.orch_id,
host=p.host_name):
LOG.error(_app_topology.status)
break
def _update_placement_hosts(self, _app_topology):
"""Update stack with assigned hosts."""
for v, p in self.search.node_placements.iteritems():
if isinstance(v, VM):
host = p.host_name
_app_topology.update_placement_vm_host(v.orch_id, host)
LOG.debug(" vm: " + v.orch_id + " placed in " + host)
elif isinstance(v, Group):
host = None
if v.level == "host":
placement_map[v] = node_placement.host_name
host = p.host_name
elif v.level == "rack":
placement_map[v] = node_placement.rack_name
host = p.rack_name
elif v.level == "cluster":
placement_map[v] = node_placement.cluster_name
host = p.cluster_name
_app_topology.update_placement_group_host(v.orch_id, host)
LOG.debug(" affinity: " + v.orch_id + " placed in " + host)
LOG.debug(v.name + " placed in " + placement_map[v])
def _delete_old_placements(self, _old_placements):
"""Delete old placements from host and groups."""
self._update_resource_status(uuid_map)
for _v_id, vm_alloc in _old_placements.iteritems():
self.resource.remove_vm_from_host(vm_alloc, orch_id=_v_id,
uuid=_v_id)
self.resource.update_host_time(vm_alloc["host"])
return placement_map
host = self.resource.hosts[vm_alloc["host"]]
self.resource.remove_vm_from_groups(host, orch_id=_v_id,
uuid=_v_id)
else:
self.status = self.search.status
return None
self.resource.update_topology(store=False)
def _delete_old_vms(self, _old_vm_map):
uuid_map = {}
def _update_resource_status(self, _app_topology):
"""Update resource status based on placements."""
for h_uuid, info in _old_vm_map.iteritems():
uuid = self.resource.get_uuid(h_uuid, info[0])
if uuid is not None:
uuid_map[h_uuid] = uuid
self.resource.remove_vm_by_h_uuid_from_host(
info[0], h_uuid, info[1], info[2], info[3])
self.resource.update_host_time(info[0])
host = self.resource.hosts[info[0]]
self.resource.remove_vm_by_h_uuid_from_logical_groups(host, h_uuid)
return uuid_map
def _update_resource_status(self, _uuid_map):
for v, np in self.search.node_placements.iteritems():
uuid = "none"
if _uuid_map is not None:
if v.uuid in _uuid_map.keys():
uuid = _uuid_map[v.uuid]
if isinstance(v, VM):
vm_info = {}
vm_info["stack_id"] = _app_topology.app_id
vm_info["orch_id"] = v.orch_id
vm_info["uuid"] = _app_topology.get_placement_uuid(v.orch_id)
vm_info["name"] = v.name
self.resource.add_vm_to_host(np.host_name,
(v.uuid, v.name, uuid),
v.vCPUs, v.mem, v.local_volume_size)
self._update_logical_grouping(
v, self.search.avail_hosts[np.host_name], uuid)
vm_alloc = {}
vm_alloc["host"] = np.host_name
vm_alloc["vcpus"] = v.vCPUs
vm_alloc["mem"] = v.mem
vm_alloc["local_volume"] = v.local_volume_size
if self.resource.add_vm_to_host(vm_alloc, vm_info) is True:
self.resource.update_host_time(np.host_name)
def _update_logical_grouping(self, _v, _avail_host, _uuid):
for lgk, lg in _avail_host.host_memberships.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or \
self._update_grouping(v,
self.search.avail_hosts[np.host_name],
vm_info)
self.resource.update_topology(store=False)
def _update_grouping(self, _v, _host, _vm_info):
"""Update group status in resource."""
for lgk, lg in _host.host_memberships.iteritems():
if lg.group_type == "EX" or \
lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_name = lgk.split(":")
if lg_name[0] == "host" and lg_name[1] != "any":
self.resource.add_logical_group(_avail_host.host_name,
self.resource.add_group(_host.host_name,
lgk, lg.group_type)
if _avail_host.rack_name != "any":
for lgk, lg in _avail_host.rack_memberships.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or \
if _host.rack_name != "any":
for lgk, lg in _host.rack_memberships.iteritems():
if lg.group_type == "EX" or \
lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_name = lgk.split(":")
if lg_name[0] == "rack" and lg_name[1] != "any":
self.resource.add_logical_group(_avail_host.rack_name,
self.resource.add_group(_host.rack_name,
lgk, lg.group_type)
if _avail_host.cluster_name != "any":
for lgk, lg in _avail_host.cluster_memberships.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or \
if _host.cluster_name != "any":
for lgk, lg in _host.cluster_memberships.iteritems():
if lg.group_type == "EX" or \
lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_name = lgk.split(":")
if lg_name[0] == "cluster" and lg_name[1] != "any":
self.resource.add_logical_group(
_avail_host.cluster_name, lgk, lg.group_type)
self.resource.add_group(_host.cluster_name,
lgk, lg.group_type)
vm_logical_groups = []
self._collect_logical_groups_of_vm(_v, vm_logical_groups)
vm_groups = []
self._collect_groups_of_vm(_v, vm_groups)
host = self.resource.hosts[_avail_host.host_name]
self.resource.add_vm_to_logical_groups(
host, (_v.uuid, _v.name, _uuid), vm_logical_groups)
host = self.resource.hosts[_host.host_name]
self.resource.add_vm_to_groups(host, _vm_info, vm_groups)
def _collect_groups_of_vm(self, _v, _vm_groups):
"""Collect all groups of the vm of its parent (affinity)."""
def _collect_logical_groups_of_vm(self, _v, _vm_logical_groups):
if isinstance(_v, VM):
for es in _v.extra_specs_list:
if "host_aggregates" in es.keys():
lg_list = es["host_aggregates"]
for lgk in lg_list:
if lgk not in _vm_logical_groups:
_vm_logical_groups.append(lgk)
if lgk not in _vm_groups:
_vm_groups.append(lgk)
if _v.availability_zone is not None:
az = _v.availability_zone.split(":")[0]
if az not in _vm_logical_groups:
_vm_logical_groups.append(az)
if az not in _vm_groups:
_vm_groups.append(az)
for _, level in _v.exclusivity_groups.iteritems():
if level not in _vm_logical_groups:
_vm_logical_groups.append(level)
for _, g in _v.exclusivity_groups.iteritems():
if g not in _vm_groups:
_vm_groups.append(g)
for _, level in _v.diversity_groups.iteritems():
if level not in _vm_logical_groups:
_vm_logical_groups.append(level)
for _, g in _v.diversity_groups.iteritems():
if g not in _vm_groups:
_vm_groups.append(g)
if isinstance(_v, VGroup):
if isinstance(_v, Group):
name = _v.level + ":" + _v.name
if name not in _vm_logical_groups:
_vm_logical_groups.append(name)
if name not in _vm_groups:
_vm_groups.append(name)
if _v.survgroup is not None:
self._collect_logical_groups_of_vm(
_v.survgroup, _vm_logical_groups)
if _v.surgroup is not None:
self._collect_groups_of_vm(_v.surgroup, _vm_groups)
def _update_uuid(self, _orch_id, _uuid, _host_name):
"""Update physical uuid of placement in host."""
host = self.resource.hosts[_host_name]
if host.update_uuid(_orch_id, _uuid) is True:
self.resource.update_host_time(_host_name)
else:
LOG.warn("fail to update uuid in host = " + host.name)
self.resource.update_uuid_in_groups(_orch_id, _uuid, host)
self.resource.update_topology(store=False)
def _delete_placement_in_host(self, _orch_id, _vm_alloc):
self.resource.remove_vm_from_host(_vm_alloc, orch_id=_orch_id)
self.resource.update_host_time(_vm_alloc["host"])
host = self.resource.hosts[_vm_alloc["host"]]
self.resource.remove_vm_from_groups(host, orch_id=_orch_id)
self.resource.update_topology(store=False)

View File

@ -12,19 +12,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import traceback
from oslo_config import cfg
from oslo_log import log
from valet.engine.listener.listener_manager import ListenerManager
from valet.engine.optimizer.app_manager.app_handler import AppHandler
from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.db_connect.music_handler import MusicHandler
from valet.engine.optimizer.app_manager.placement_handler \
import PlacementHandler
from valet.engine.optimizer.db_connect.db_handler import DBHandler
from valet.engine.optimizer.event_handler.event_handler import EventHandler
from valet.engine.optimizer.ostro.bootstrapper import Bootstrapper
from valet.engine.optimizer.ostro.optimizer import Optimizer
from valet.engine.resource_manager.compute_manager import ComputeManager
from valet.engine.resource_manager.metadata_manager import MetadataManager
from valet.engine.resource_manager.resource import Resource
from valet.engine.resource_manager.topology_manager import TopologyManager
@ -33,203 +37,165 @@ LOG = log.getLogger(__name__)
class Ostro(object):
"""Valet Engine."""
"""Main class for placement scheduling."""
def __init__(self, _config):
"""Initialization."""
self.config = _config
self.db = MusicHandler(self.config)
if self.db.init_db() is False:
LOG.error("error while initializing MUSIC database")
self.resource = Resource(self.db, self.config)
self.app_handler = AppHandler(self.resource, self.db, self.config)
self.optimizer = Optimizer(self.resource)
self.end_of_process = False
self.batch_store_trigger = 10 # sec
self.data_lock = threading.Lock()
self.thread_list = []
self.topology = TopologyManager(
1, "Topology", self.resource,
self.db = DBHandler(self.config)
self.resource = Resource(self.db, self.config)
self.compute = ComputeManager(1, "Compute", self.resource,
self.data_lock, self.config)
self.compute = ComputeManager(
2, "Compute", self.resource,
self.topology = TopologyManager(2, "Topology", self.resource,
self.data_lock, self.config)
self.metadata = MetadataManager(3, "Metadata", self.resource,
self.data_lock, self.config)
self.listener = ListenerManager(4, "Listener", CONF)
self.listener = ListenerManager(3, "Listener", CONF)
self.phandler = PlacementHandler(self.db)
self.ahandler = AppHandler(self.phandler, self.metadata, self.resource,
self.db, self.config)
self.status = "success"
self.end_of_process = False
self.compute.set_handlers(self.phandler, self.ahandler)
self.batch_store_trigger = 10 # sec
self.optimizer = Optimizer()
self.ehandler = EventHandler(self.phandler, self.ahandler,
self.resource, self.db)
self.bootstrapper = Bootstrapper(self.resource, self.db)
self.bootstrapper.set_handlers(self.phandler)
def bootstrap(self):
"""Load all required datacenter resource information."""
if not self.bootstrapper.load_data(self.compute, self.topology, self.metadata):
return False
if not self.bootstrapper.verify_pre_valet_placements():
return False
return True
def run_ostro(self):
LOG.info("start Ostro ......")
"""Run main valet-engine (ostro) loop."""
LOG.info("start ostro ......")
self.topology.start()
self.compute.start()
self.topology.start()
self.metadata.start()
self.listener.start()
self.thread_list.append(self.topology)
self.thread_list.append(self.compute)
self.thread_list.append(self.topology)
self.thread_list.append(self.metadata)
self.thread_list.append(self.listener)
while self.end_of_process is False:
request_list = self.db.get_requests()
if request_list is None:
break
if len(request_list) > 0:
if self.place_app(request_list) is False:
if self._handle_requests(request_list) is False:
break
else:
event_list = self.db.get_events()
if event_list is None:
break
if len(event_list) > 0:
if self.handle_events(event_list) is False:
if self.ehandler.handle_events(event_list,
self.data_lock) is False:
break
else:
time_diff = time.time() - self.resource.curr_db_timestamp
if (self.resource.resource_updated and
time_diff >= self.batch_store_trigger):
now_time = (time.time() - self.resource.current_timestamp)
if now_time >= self.batch_store_trigger:
self.data_lock.acquire()
if self.resource.store_topology_updates() is False:
self.data_lock.release()
break
self.resource.resource_updated = False
self.data_lock.release()
else:
time.sleep(0.1)
self.topology.end_of_process = True
self.compute.end_of_process = True
self.topology.end_of_process = True
self.metadata.end_of_process = True
for t in self.thread_list:
t.join()
LOG.info("exit Ostro")
LOG.info("exit ostro")
def stop_ostro(self):
"""Stop main engine process."""
"""Stop process of retrieving and handling events and requests from
the db. Stop topology and compute processes.
def _handle_requests(self, _req_list):
"""Deal with all requests.
Request types are 'query', 'create', 'replan', 'identify', 'update',
'migrate', 'ping'.
"""
self.end_of_process = True
while len(self.thread_list) > 0:
time.sleep(1)
for t in self.thread_list:
if not t.is_alive():
self.thread_list.remove(t)
def bootstrap(self):
"""Start bootstrap and update the engine's resource topology."""
LOG.info("Ostro.bootstrap: start bootstrap")
try:
resource_status = self.db.get_resource_status(
self.resource.datacenter.name)
if resource_status is None:
LOG.error("failed to read from table: %s" %
self.config.db_resource_table)
return False
if len(resource_status) > 0:
LOG.info("bootstrap from DB")
if not self.resource.bootstrap_from_db(resource_status):
LOG.error("failed to parse bootstrap data!")
LOG.info("bootstrap from OpenStack")
if not self._set_hosts():
return False
if not self._set_flavors():
return False
if not self._set_topology():
return False
self.resource.update_topology()
except Exception:
LOG.critical("Ostro.bootstrap failed: ",
traceback.format_exc())
LOG.info("done bootstrap")
return True
def _set_topology(self):
if not self.topology.set_topology():
LOG.error("failed to read datacenter topology")
return False
LOG.info("done topology bootstrap")
return True
def _set_hosts(self):
if not self.compute.set_hosts():
LOG.error("failed to read hosts from OpenStack (Nova)")
return False
LOG.info("done hosts & groups bootstrap")
return True
def _set_flavors(self):
if not self.compute.set_flavors():
LOG.error("failed to read flavors from OpenStack (Nova)")
return False
LOG.info("done flavors bootstrap")
return True
# TODO(GJ): evaluate delay
def place_app(self, _app_data):
for req in _app_data:
for req in _req_list:
if req["action"] == "query":
LOG.info("start query")
query_result = self._query(req)
result = self._get_json_results("query", "ok",
self.status, query_result)
if query_result is None:
LOG.error("valet-engine exits due to the error")
return False
result = self._get_json_query_result(req["stack_id"],
query_result)
if not self.db.put_result(result):
return False
LOG.info("done query")
else:
LOG.info("start app placement")
# FIXME(gjung): history check not works & due to update,
# ad-hoc and replan with the same key
# result = None
# (decision_key, old_decision) = \
# self.ahandler.check_history(req)
result = None
(decision_key, old_decision) = self.app_handler.check_history(
req)
if old_decision is None:
placement_map = self._place_app(req)
if placement_map is None:
result = self._get_json_results(
"placement", "error", self.status, placement_map)
else:
result = self._get_json_results(
"placement", "ok", "success", placement_map)
if decision_key is not None:
self.app_handler.put_history(decision_key, result)
else:
LOG.info("decision(%s) already made" % decision_key)
result = old_decision
# if old_decision is None:
app_topology = self._plan_app(req)
if app_topology is None:
LOG.error("valet-engine exits due to the error")
return False
LOG.info("plan result status: " + app_topology.status)
result = self._get_json_result(app_topology)
# if decision_key is not None:
# self.ahandler.record_history(decision_key, result)
# else:
# LOG.warn("decision(" + decision_key + ") already made")
# result = old_decision
if app_topology.action in ("ping", "create", "replan",
"update", "migrate"):
if not self.db.put_result(result):
return False
LOG.info("done app placement")
if not self.db.delete_requests(result):
return False
return True
def _query(self, _q):
"""Get placements information of valet group (affinity, diversity,
exclusivity).
"""
LOG.info("got query")
query_result = {}
query_result["result"] = None
query_result["status"] = "ok"
if "type" in _q.keys():
if _q["type"] == "group_vms":
@ -237,477 +203,159 @@ class Ostro(object):
params = _q["parameters"]
if "group_name" in params.keys():
self.data_lock.acquire()
vm_list = self._get_vms_from_logical_group(
params["group_name"])
placement_list = self._get_placements_from_group(params["group_name"])
self.data_lock.release()
query_result[_q["stack_id"]] = vm_list
query_result["result"] = placement_list
else:
self.status = "unknown paramenter in query"
LOG.warning("unknown paramenter in query")
query_result[_q["stack_id"]] = None
query_result["status"] = "unknown paramenter in query"
else:
self.status = "no paramenter in query"
LOG.warning("no parameters in query")
query_result[_q["stack_id"]] = None
elif _q["type"] == "all_groups":
query_result["status"] = "no paramenter in query"
elif _q["type"] == "invalid_placements":
self.data_lock.acquire()
query_result[_q["stack_id"]] = self._get_logical_groups()
result = self._get_invalid_placements()
if result is None:
self.data_lock.release()
return None
query_result["result"] = result
self.data_lock.release()
else:
self.status = "unknown query type"
LOG.warning("unknown query type")
query_result[_q["stack_id"]] = None
query_result["status"] = "unknown query type"
else:
self.status = "unknown type in query"
LOG.warning("no type in query")
query_result[_q["stack_id"]] = None
query_result["status"] = "no type in query"
if query_result["status"] != "ok":
LOG.warn(query_result["status"])
query_result["result"] = None
return query_result
def _get_vms_from_logical_group(self, _group_name):
vm_list = []
def _get_placements_from_group(self, _group_name):
"""Get all placements information of given valet group."""
vm_id_list = []
for lgk, lg in self.resource.logical_groups.iteritems():
if lg.group_type == "EX" or lg.group_type == "AFF" or \
placement_list = []
vm_info_list = []
for lgk, lg in self.resource.groups.iteritems():
if lg.group_type == "EX" or \
lg.group_type == "AFF" or \
lg.group_type == "DIV":
lg_id = lgk.split(":")
if lg_id[1] == _group_name:
vm_id_list = lg.vm_list
vm_info_list = lg.vm_list
break
for vm_id in vm_id_list:
if vm_id[2] != "none": # if physical_uuid != 'none'
vm_list.append(vm_id[2])
for vm_info in vm_info_list:
if vm_info["uuid"] != "none":
placement_list.append(vm_info["uuid"])
else:
LOG.warning("found pending vms in this group while query")
return vm_list
return placement_list
def _get_logical_groups(self):
logical_groups = {}
def _get_invalid_placements(self):
"""Get all invalid placements."""
for lgk, lg in self.resource.logical_groups.iteritems():
logical_groups[lgk] = lg.get_json_info()
return logical_groups
def _place_app(self, _app):
"""Set application topology."""
app_topology = self.app_handler.add_app(_app)
if app_topology is None:
self.status = self.app_handler.status
LOG.error("Ostro._place_app: error while register"
"requested apps: " + self.app_handler.status)
if not self.bootstrapper.verify_pre_valet_placements():
return None
"""Check and set vm flavor information."""
for _, vm in app_topology.vms.iteritems():
if self._set_vm_flavor_information(vm) is False:
self.status = "fail to set flavor information"
LOG.error(self.status)
return None
for _, vg in app_topology.vgroups.iteritems():
if self._set_vm_flavor_information(vg) is False:
self.status = "fail to set flavor information in a group"
LOG.error(self.status)
return None
vms = {}
self.data_lock.acquire()
placement_list = self.phandler.get_placements()
"""Set weights for optimization."""
app_topology.set_weight()
app_topology.set_optimization_priority()
for p in placement_list:
if p["status"] is not None and p["status"] != "verified":
status = {}
status["status"] = p["status"]
vms[p["uuid"]] = status
"""Perform search for optimal placement of app topology."""
placement_map = self.optimizer.place(app_topology)
if placement_map is None:
self.status = self.optimizer.status
self.data_lock.release()
return None
return vms
# Update resource and app information
if len(placement_map) > 0:
self.resource.update_topology(store=False)
self.app_handler.add_placement(
placement_map, app_topology, self.resource.current_timestamp)
def _plan_app(self, _app):
"""Deal with app placement request.
if (len(app_topology.exclusion_list_map) > 0 and
len(app_topology.planned_vm_map) > 0):
for vk in app_topology.planned_vm_map.keys():
if vk in placement_map.keys():
del placement_map[vk]
self.data_lock.release()
return placement_map
def _set_vm_flavor_information(self, _v):
if isinstance(_v, VM):
return self._set_vm_flavor_properties(_v)
else: # affinity group
for _, sg in _v.subvgroups.iteritems():
if self._set_vm_flavor_information(sg) is False:
return False
return True
def _set_vm_flavor_properties(self, _vm):
flavor = self.resource.get_flavor(_vm.flavor)
if flavor is None:
LOG.warning("Ostro._set_vm_flavor_properties: does not exist "
"flavor (" + _vm.flavor + ") and try to refetch")
# Reset flavor resource and try again
if self._set_flavors() is False:
return False
flavor = self.resource.get_flavor(_vm.flavor)
if flavor is None:
return False
_vm.vCPUs = flavor.vCPUs
_vm.mem = flavor.mem_cap
_vm.local_volume_size = flavor.disk_cap
if len(flavor.extra_specs) > 0:
extra_specs = {}
for mk, mv in flavor.extra_specs.iteritems():
extra_specs[mk] = mv
_vm.extra_specs_list.append(extra_specs)
return True
# TODO(GJ): evaluate the delay
def handle_events(self, _event_list):
"""Handle events in the event list."""
"""Update the engine's resource topology based on the properties of
each event in the event list.
Validate the request, extract info, search placements, and store/cache results.
"""
self.data_lock.acquire()
resource_updated = False
for e in _event_list:
if e.host is not None and e.host != "none":
if self._check_host(e.host) is False:
LOG.warning("Ostro.handle_events: host (" + e.host +
") related to this event not exists")
continue
if e.method == "build_and_run_instance":
# VM is created (from stack)
LOG.info("Ostro.handle_events: got build_and_run "
"event for %s" % e.uuid)
if self.db.put_uuid(e) is False:
app_topology = self.ahandler.set_app(_app)
if app_topology is None:
self.data_lock.release()
return False
elif e.method == "object_action":
if e.object_name == 'Instance':
# VM became active or deleted
# h_uuid, stack_id
orch_id = self.db.get_uuid(e.uuid)
if orch_id is None:
return None
elif app_topology.status != "success":
self.data_lock.release()
return False
return app_topology
if e.vm_state == "active":
LOG.info("Ostro.handle_events: got instance_"
"active event for " + e.uuid)
vm_info = self.app_handler.get_vm_info(
orch_id[1], orch_id[0], e.host)
if vm_info is None:
LOG.error("Ostro.handle_events: error "
"while getting app info "
"from MUSIC")
self.optimizer.plan(app_topology)
if app_topology.status != "success":
self.data_lock.release()
return False
return app_topology
if len(vm_info) == 0:
# Stack not found because vm is created by the
# other stack
LOG.warning("EVENT: no vm_info found in app "
"placement record")
self._add_vm_to_host(
e.uuid, orch_id[0], e.host, e.vcpus,
e.mem, e.local_disk)
else:
if ("planned_host" in vm_info.keys() and
vm_info["planned_host"] != e.host):
# VM is activated in the different host
LOG.warning("EVENT: vm activated in the "
"different host")
self._add_vm_to_host(
e.uuid, orch_id[0], e.host, e.vcpus,
e.mem, e.local_disk)
self._remove_vm_from_host(
e.uuid, orch_id[0],
vm_info["planned_host"],
float(vm_info["cpus"]),
float(vm_info["mem"]),
float(vm_info["local_volume"]))
self._remove_vm_from_logical_groups(
e.uuid, orch_id[0],
vm_info["planned_host"])
else:
# Found vm in the planned host,
# Possibly the vm deleted in the host while
# batch cleanup
if not self._check_h_uuid(orch_id[0], e.host):
LOG.warning("EVENT: planned vm was "
"deleted")
if self._check_uuid(e.uuid, e.host):
self._update_h_uuid_in_host(orch_id[0],
e.uuid,
e.host)
self._update_h_uuid_in_logical_groups(
orch_id[0], e.uuid, e.host)
else:
LOG.info(
"EVENT: vm activated as planned")
self._update_uuid_in_host(
orch_id[0], e.uuid, e.host)
self._update_uuid_in_logical_groups(
orch_id[0], e.uuid, e.host)
resource_updated = True
elif e.vm_state == "deleted":
LOG.info("EVENT: got instance_delete for %s" %
e.uuid)
self._remove_vm_from_host(
e.uuid, orch_id[0], e.host, e.vcpus,
e.mem, e.local_disk)
self._remove_vm_from_logical_groups(
e.uuid, orch_id[0], e.host)
if not self.app_handler.update_vm_info(
orch_id[1], orch_id[0]):
LOG.error("EVENT: error while updating "
"app in MUSIC")
if not self.ahandler.store_app(app_topology):
self.data_lock.release()
return False
resource_updated = True
else:
LOG.warning("Ostro.handle_events: unknown vm_"
"state = " + e.vm_state)
elif e.object_name == 'ComputeNode':
# Host resource is updated
LOG.debug("Ostro.handle_events: got compute event")
elif e.object_name == 'ComputeNode':
# Host resource is updated
LOG.info("EVENT: got compute for " + e.host)
# NOTE: what if host is disabled?
if self.resource.update_host_resources(
e.host, e.status, e.vcpus, e.vcpus_used, e.mem,
e.free_mem, e.local_disk, e.free_local_disk,
e.disk_available_least) is True:
self.resource.update_host_time(e.host)
resource_updated = True
else:
LOG.warning("Ostro.handle_events: unknown object_"
"name = " + e.object_name)
else:
LOG.warning("Ostro.handle_events: unknown event "
"method = " + e.method)
if resource_updated is True:
self.resource.update_topology(store=False)
for e in _event_list:
if self.db.delete_event(e.event_id) is False:
self.data_lock.release()
return False
if e.method == "object_action":
if e.object_name == 'Instance':
if e.vm_state == "deleted":
if self.db.delete_uuid(e.uuid) is False:
self.data_lock.release()
return False
return None
self.data_lock.release()
return True
return app_topology
def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
_local_disk):
existing_vm = False
if self._check_uuid(_uuid, _host_name) is True:
existing_vm = True
else:
if self._check_h_uuid(_h_uuid, _host_name) is True:
existing_vm = True
def _get_json_query_result(self, _stack_id, _result):
"""Set query result format as JSON."""
if existing_vm is False:
vm_id = None
if _h_uuid is None:
vm_id = ("none", "none", _uuid)
else:
vm_id = (_h_uuid, "none", _uuid)
self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem,
_local_disk)
self.resource.update_host_time(_host_name)
def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
_local_disk):
if self._check_h_uuid(_h_uuid, _host_name) is True:
self.resource.remove_vm_by_h_uuid_from_host(_host_name, _h_uuid,
_vcpus, _mem,
_local_disk)
self.resource.update_host_time(_host_name)
else:
if self._check_uuid(_uuid, _host_name) is True:
self.resource.remove_vm_by_uuid_from_host(_host_name, _uuid,
_vcpus, _mem,
_local_disk)
self.resource.update_host_time(_host_name)
else:
LOG.warning("vm (%s) is missing while removing" % _uuid)
def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name):
host = self.resource.hosts[_host_name]
if _h_uuid is not None and _h_uuid != "none":
self.resource.remove_vm_by_h_uuid_from_logical_groups(
host, _h_uuid)
else:
self.resource.remove_vm_by_uuid_from_logical_groups(host, _uuid)
def _check_host(self, _host_name):
exist = False
for hk in self.resource.hosts.keys():
if hk == _host_name:
exist = True
break
return exist
def _check_h_uuid(self, _h_uuid, _host_name):
if _h_uuid is None or _h_uuid == "none":
return False
host = self.resource.hosts[_host_name]
return host.exist_vm_by_h_uuid(_h_uuid)
def _check_uuid(self, _uuid, _host_name):
if _uuid is None or _uuid == "none":
return False
host = self.resource.hosts[_host_name]
return host.exist_vm_by_uuid(_uuid)
def _update_uuid_in_host(self, _h_uuid, _uuid, _host_name):
host = self.resource.hosts[_host_name]
if host.update_uuid(_h_uuid, _uuid) is True:
self.resource.update_host_time(_host_name)
else:
LOG.warning("Ostro._update_uuid_in_host: fail to update uuid "
"in host = %s" % host.name)
def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name):
host = self.resource.hosts[_host_name]
if host.update_h_uuid(_h_uuid, _uuid) is True:
self.resource.update_host_time(_host_name)
def _update_uuid_in_logical_groups(self, _h_uuid, _uuid, _host_name):
host = self.resource.hosts[_host_name]
self.resource.update_uuid_in_logical_groups(_h_uuid, _uuid, host)
def _update_h_uuid_in_logical_groups(self, _h_uuid, _uuid, _host_name):
host = self.resource.hosts[_host_name]
self.resource.update_h_uuid_in_logical_groups(_h_uuid, _uuid, host)
def _get_json_results(self, _request_type, _status_type, _status_message,
_map):
result = {}
result[_stack_id] = {}
if _request_type == "query":
for qk, qr in _map.iteritems():
query_result = {}
result[_stack_id]["action"] = "query"
result[_stack_id]["stack_id"] = _stack_id
query_status = {}
if qr is None:
if _result["status"] != "ok":
query_status['type'] = "error"
query_status['message'] = _status_message
query_status['message'] = _result["status"]
else:
query_status['type'] = "ok"
query_status['message'] = "success"
result[_stack_id]['status'] = query_status
query_result['status'] = query_status
if qr is not None:
query_result['resources'] = qr
result[qk] = query_result
else:
if _status_type != "error":
applications = {}
for v in _map.keys():
if isinstance(v, VM):
resources = None
if v.app_uuid in applications.keys():
resources = applications[v.app_uuid]
else:
resources = {}
applications[v.app_uuid] = resources
host = _map[v]
resource_property = {"host": host}
properties = {"properties": resource_property}
resources[v.uuid] = properties
for appk, app_resources in applications.iteritems():
app_result = {}
app_status = {}
app_status['type'] = _status_type
app_status['message'] = _status_message
app_result['status'] = app_status
app_result['resources'] = app_resources
result[appk] = app_result
for appk, app in self.app_handler.apps.iteritems():
if app.request_type == "ping":
app_result = {}
app_status = {}
app_status['type'] = _status_type
app_status['message'] = "ping"
app_result['status'] = app_status
app_result['resources'] = {
"ip": self.config.ip, "id": self.config.priority}
result[appk] = app_result
else:
for appk in self.app_handler.apps.keys():
app_result = {}
app_status = {}
app_status['type'] = _status_type
app_status['message'] = _status_message
app_result['status'] = app_status
app_result['resources'] = {}
result[appk] = app_result
if _result["result"] is not None:
result[_stack_id]['resources'] = _result["result"]
return result
def _get_json_result(self, _app_topology):
"""Set request result format as JSON."""
result = {}
result[_app_topology.app_id] = {}
result[_app_topology.app_id]["action"] = _app_topology.action
result[_app_topology.app_id]["stack_id"] = _app_topology.app_id
if _app_topology.action == "ping":
app_status = {}
if _app_topology.status != "success":
app_status['type'] = "error"
app_status['message'] = _app_topology.status
result[_app_topology.app_id]['status'] = app_status
result[_app_topology.app_id]['resources'] = {}
else:
app_status['type'] = "ok"
app_status['message'] = _app_topology.status
result[_app_topology.app_id]['status'] = app_status
result[_app_topology.app_id]['resources'] = {"ip": self.config.ip, "id": self.config.priority}
elif _app_topology.action in ("create", "replan", "update", "migrate"):
app_status = {}
if _app_topology.status != "success":
app_status['type'] = "error"
app_status['message'] = _app_topology.status
result[_app_topology.app_id]['status'] = app_status
result[_app_topology.app_id]['resources'] = {}
else:
app_status['type'] = "ok"
app_status['message'] = _app_topology.status
result[_app_topology.app_id]['status'] = app_status
resources = {}
for rk, r in _app_topology.stack["placements"].iteritems():
if r["type"] == "OS::Nova::Server":
resources[rk] = {"properties": {"host": r["properties"]["host"]}}
result[_app_topology.app_id]['resources'] = resources
return result

View File

@ -12,35 +12,46 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.app_manager.app_topology_base import VGroup
class GroupResource(object):
"""Container for all groups."""
def __init__(self):
self.name = None
self.group_type = "AGGR"
self.metadata = {}
self.num_of_placed_vms = 0
# key = host (host or rack), value = num_of_placed_vms
self.num_of_placed_vms_per_host = {}
class Resource(object):
"""Resource."""
def __init__(self):
"""Initialization."""
# level of placement
self.level = None
self.host_name = None
self.host_memberships = {} # all mapped logical groups to host
self.host_vCPUs = 0 # original total vCPUs before overcommit
self.host_avail_vCPUs = 0 # remaining vCPUs after overcommit
self.host_mem = 0 # original total mem cap before overcommit
self.host_avail_mem = 0 # remaining mem cap after
# all mapped groups to host
self.host_memberships = {}
# original total vCPUs before overcommit
self.host_vCPUs = 0
# remaining vCPUs after overcommit
self.host_avail_vCPUs = 0
# original total mem cap before overcommit
self.host_mem = 0
# remaining mem cap after
self.host_avail_mem = 0
# original total local disk cap before overcommit
self.host_local_disk = 0
# remaining local disk cap after overcommit
self.host_avail_local_disk = 0
# the number of vms currently placed in this host
self.host_num_of_placed_vms = 0
self.rack_name = None # where this host is located
# where this host is located
self.rack_name = None
self.rack_memberships = {}
self.rack_vCPUs = 0
self.rack_avail_vCPUs = 0
@ -62,7 +73,11 @@ class Resource(object):
self.cluster_avail_local_disk = 0
self.cluster_num_of_placed_vms = 0
self.sort_base = 0 # order to place
# level of placement
self.level = None
# order to place
self.sort_base = 0
def get_common_placement(self, _resource):
"""Get common placement level."""
@ -109,6 +124,27 @@ class Resource(object):
return memberships
def get_all_memberships(self, _level):
memberships = {}
if _level == "cluster":
for mk, m in self.cluster_memberships.iteritems():
memberships[mk] = m
for mk, m in self.rack_memberships.iteritems():
memberships[mk] = m
for mk, m in self.host_memberships.iteritems():
memberships[mk] = m
elif _level == "rack":
for mk, m in self.rack_memberships.iteritems():
memberships[mk] = m
for mk, m in self.host_memberships.iteritems():
memberships[mk] = m
elif _level == "host":
for mk, m in self.host_memberships.iteritems():
memberships[mk] = m
return memberships
def get_num_of_placed_vms(self, _level):
"""Get the number of placed vms of this resource at a given level."""
num_of_vms = 0
@ -209,53 +245,3 @@ class Resource(object):
avail_mem = self.host_avail_mem
return (mem, avail_mem)
class LogicalGroupResource(object):
"""LogicalGroupResource."""
def __init__(self):
"""Initialization."""
self.name = None
self.group_type = "AGGR"
self.metadata = {}
self.num_of_placed_vms = 0
# key = host (i.e., id of host or rack), value = num_of_placed_vms
self.num_of_placed_vms_per_host = {}
class Node(object):
"""Node."""
def __init__(self):
self.node = None # VM or VGroup
self.sort_base = -1
def get_common_diversity(self, _diversity_groups):
"""Return the common level of the given diversity groups."""
common_level = "ANY"
for dk in self.node.diversity_groups.keys():
if dk in _diversity_groups.keys():
level = self.node.diversity_groups[dk].split(":")[0]
if common_level != "ANY":
if LEVELS.index(level) > LEVELS.index(common_level):
common_level = level
else:
common_level = level
return common_level
def get_affinity_id(self):
"""Return the affinity id."""
aff_id = None
if isinstance(self.node, VGroup) and \
self.node.vgroup_type == "AFF" and \
self.node.name != "any":
aff_id = self.node.level + ":" + self.node.name
return aff_id

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,109 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from valet.engine.optimizer.app_manager.group import Group, LEVEL
from valet.engine.optimizer.app_manager.vm import VM
def get_group_of_vm(_vmk, _groups):
'''Get group where vm is located.'''
group = None
for gk, g in _groups.iteritems():
if check_vm_grouping(g, _vmk) is True:
group = g
break
return group
def check_vm_grouping(_vg, _vmk):
'''Check recursively if vm is located in the group.'''
exist = False
for sgk, sg in _vg.subgroups.iteritems():
if isinstance(sg, VM):
if sgk == _vmk:
exist = True
break
elif isinstance(sg, Group):
if check_vm_grouping(sg, _vmk) is True:
exist = True
break
return exist
def get_child_vms(_vg, _vm_list):
for sgk, sg in _vg.subgroups.iteritems():
if isinstance(sg, VM):
_vm_list.append(sgk)
else:
get_child_vms(sg, _vm_list)
def get_node_resource_of_level(_n, _level, _avail_hosts):
'''Get the name of resource in the level for the planned vm or affinity group.'''
resource_name = None
if isinstance(_n, VM):
resource_name = get_resource_of_level(_n.host, _level, _avail_hosts)
elif isinstance(_n, Group):
if _n.level == "host":
resource_name = get_resource_of_level(_n.host, _level, _avail_hosts)
elif _n.level == "rack":
if _level == "rack":
resource_name = _n.host
elif _level == "cluster":
for _, ah in _avail_hosts.iteritems():
if ah.rack_name == _n.host:
resource_name = ah.cluster_name
break
elif _n.level == "cluster":
if _level == "cluster":
resource_name = _n.host
return resource_name
def get_resource_of_level(_host_name, _level, _avail_hosts):
'''Get resource name of level for the host.'''
resource_name = None
if _level == "host":
resource_name = _host_name
elif _level == "rack":
if _host_name in _avail_hosts.keys():
resource_name = _avail_hosts[_host_name].rack_name
elif _level == "cluster":
if _host_name in _avail_hosts.keys():
resource_name = _avail_hosts[_host_name].cluster_name
return resource_name
def get_next_placements(_n, _level):
'''Get vms and groups to be handled in the next level search.'''
vms = {}
groups = {}
if isinstance(_n, Group):
if LEVEL.index(_n.level) < LEVEL.index(_level):
groups[_n.orch_id] = _n
else:
for _, sg in _n.subgroups.iteritems():
if isinstance(sg, VM):
vms[sg.orch_id] = sg
elif isinstance(sg, Group):
groups[sg.orch_id] = sg
else:
vms[_n.orch_id] = _n
return (vms, groups)

View File

@ -17,7 +17,6 @@
"""Valet Engine Server Configuration."""
import os
from oslo_config import cfg
from valet.engine.conf import init_engine
@ -31,17 +30,9 @@ class Config(object):
def __init__(self, *default_config_files):
init_engine(default_config_files=default_config_files)
# System parameters
self.root_loc = os.path.dirname(CONF.default_config_files[0])
self.mode = None
self.command = 'status'
self.process = None
self.control_loc = None
self.api_protocol = 'http://'
self.db_keyspace = None
@ -51,12 +42,12 @@ class Config(object):
self.db_resource_table = None
self.db_app_table = None
self.db_uuid_table = None
self.db_group_table = None
self.replication_factor = 3
self.hosts = ['localhost']
self.port = 8080
self.ip = None
self.priority = 0
# Logging parameters
@ -79,6 +70,7 @@ class Config(object):
self.topology_trigger_freq = 0
self.compute_trigger_freq = 0
self.metadata_trigger_freq = 0
self.update_batch_wait = 0
self.default_cpu_allocation_ratio = 1
@ -94,27 +86,6 @@ class Config(object):
self.user_name = None
self.pw = None
# Simulation parameters
self.sim_cfg_loc = None
self.num_of_hosts_per_rack = 0
self.num_of_racks = 0
self.num_of_spine_switches = 0
self.num_of_aggregates = 0
self.aggregated_ratio = 0
self.cpus_per_host = 0
self.mem_per_host = 0
self.disk_per_host = 0
self.bandwidth_of_spine = 0
self.bandwidth_of_rack = 0
self.bandwidth_of_host = 0
self.num_of_basic_flavors = 0
self.base_flavor_cpus = 0
self.base_flavor_mem = 0
self.base_flavor_disk = 0
# Music HA paramater
self.music_server_retries = 3
@ -124,46 +95,28 @@ class Config(object):
if status != "success":
return status
self.sim_cfg_loc = self.root_loc + self.sim_cfg_loc
self.process = self.process
self.logging_loc = self.logging_loc
self.resource_log_loc = self.logging_loc
self.app_log_loc = self.logging_loc
self.eval_log_loc = self.logging_loc
if self.mode.startswith("live") is False:
status = self._set_simulation()
if status != "success":
return status
return "success"
def _init_system(self):
self.command = CONF.command
self.mode = CONF.engine.mode
self.priority = CONF.engine.priority
self.logger_name = CONF.engine.logger_name
self.logging_level = CONF.engine.logging_level
self.logging_loc = CONF.engine.logging_dir
self.resource_log_loc = CONF.engine.logging_dir + 'resources'
self.app_log_loc = CONF.engine.logging_dir + 'app'
self.eval_log_loc = CONF.engine.logging_dir
self.max_log_size = CONF.engine.max_log_size
self.max_num_of_logs = CONF.engine.max_num_of_logs
self.process = CONF.engine.pid
self.datacenter_name = CONF.engine.datacenter_name
self.default_cpu_allocation_ratio = \
@ -176,86 +129,40 @@ class Config(object):
CONF.engine.default_disk_allocation_ratio
self.static_cpu_standby_ratio = CONF.engine.static_cpu_standby_ratio
self.static_mem_standby_ratio = CONF.engine.static_mem_standby_ratio
self.static_local_disk_standby_ratio = \
CONF.engine.static_local_disk_standby_ratio
self.topology_trigger_freq = CONF.engine.topology_trigger_frequency
self.compute_trigger_freq = CONF.engine.compute_trigger_frequency
self.metadata_trigger_freq = CONF.engine.metadata_trigger_frequency
self.update_batch_wait = CONF.engine.update_batch_wait
self.db_keyspace = CONF.music.keyspace
self.db_request_table = CONF.music.request_table
self.db_response_table = CONF.music.response_table
self.db_event_table = CONF.music.event_table
self.db_resource_table = CONF.music.resource_table
self.db_app_table = CONF.music.app_table
self.db_uuid_table = CONF.music.uuid_table
self.db_group_table = CONF.music.group_table
self.music_server_retries = CONF.music.music_server_retries
self.replication_factor = CONF.music.replication_factor
self.hosts = CONF.music.hosts
self.port = CONF.music.port
self.music_server_retries = CONF.music.music_server_retries
self.priority = CONF.engine.priority
self.ip = CONF.engine.ip
self.num_of_region_chars = CONF.engine.num_of_region_chars
self.rack_code_list = CONF.engine.rack_code_list
self.node_code_list = CONF.engine.node_code_list
self.sim_cfg_loc = CONF.engine.sim_cfg_loc
self.project_name = CONF.identity.project_name
self.user_name = CONF.identity.username
self.pw = CONF.identity.password
return "success"
def _set_simulation(self):
self.num_of_spine_switches = CONF.engine.num_of_spine_switches
self.num_of_hosts_per_rack = CONF.engine.num_of_hosts_per_rack
self.num_of_racks = CONF.engine.num_of_racks
self.num_of_aggregates = CONF.engine.num_of_aggregates
self.aggregated_ratio = CONF.engine.aggregated_ratio
self.cpus_per_host = CONF.engine.cpus_per_host
self.mem_per_host = CONF.engine.mem_per_host
self.disk_per_host = CONF.engine.disk_per_host
self.bandwidth_of_spine = CONF.engine.bandwidth_of_spine
self.bandwidth_of_rack = CONF.engine.bandwidth_of_rack
self.bandwidth_of_host = CONF.engine.bandwidth_of_host
self.num_of_basic_flavors = CONF.engine.num_of_basic_flavors
self.base_flavor_cpus = CONF.engine.base_flavor_cpus
self.base_flavor_mem = CONF.engine.base_flavor_mem
self.base_flavor_disk = CONF.engine.base_flavor_disk

View File

@ -16,13 +16,15 @@
"""Base."""
import mock
from oslo_config import fixture as fixture_config
from oslotest.base import BaseTestCase
from oslotest import base
from valet import api
from valet.tests.functional.valet_validator.common import init
class Base(BaseTestCase):
class Base(base.BaseTestCase):
"""Test case base class for all unit tests."""
def __init__(self, *args, **kwds):

View File

@ -1,74 +0,0 @@
#
# Copyright 2014-2017 AT&T Intellectual Property
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Topology."""
from oslo_log import log
from valet.engine.resource_manager.naming import Naming
from valet.tests.base import Base
LOG = log.getLogger(__name__)
class TestNaming(Base):
"""Unit Tests for valet.engine.resource_manager.naming."""
def setUp(self):
"""Setup TestNaming Test Class."""
super(TestNaming, self).setUp()
self.topo = Naming(Config(), LOG)
def test_simple_topology(self):
"""Validate simple topology (region, rack, node_type and status)."""
(full_rack_name, status) = \
self.topo._set_layout_by_name("pdk15r05c001")
self.validate_test(full_rack_name == "pdk15r05")
self.validate_test(status == "success")
def test_domain_topology(self):
"""Test Domain Topology."""
(full_rack_name, status) = \
self.topo._set_layout_by_name("ihk01r01c001.emea.att.com")
self.validate_test(full_rack_name == "ihk01r01")
self.validate_test(status == "success")
def test_unhappy_topology_r(self):
"""Test unhappy topology, region/rack/node none, invalid status 0."""
(full_rack_name, status) = \
self.topo._set_layout_by_name("pdk1505c001")
self.validate_test(full_rack_name == "none")
self.validate_test(status == "invalid rack_char = c. "
"missing rack_char = r")
def test_unhappy_topology_c(self):
"""Test unhappy topology with values none and 1 invalid status."""
(full_rack_name, status) = \
self.topo._set_layout_by_name("pdk15r05001")
self.validate_test(full_rack_name == "none")
self.validate_test(status == "incorrect format of rack "
"name = ")
# TODO(UNKNOWN): add validation to topology for region
class Config(object):
"""Config for topology."""
num_of_region_chars = 3
rack_code_list = "r"
node_code_list = "a,c,u,f,o,p,s"

View File

@ -12,7 +12,9 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
import unittest
from valet.engine.optimizer.ostro.search import Search
from valet.tests.base import Base
@ -27,6 +29,7 @@ class TestSearch(Base):
self.search = Search()
@unittest.skip("Method was removed")
def test_copy_resource_status(self):
"""Test Copy Resource Status."""
self.search.copy_resource_status(mock.MagicMock())

View File

@ -15,13 +15,13 @@
import uuid
from valet.api.db.models import music as models
from valet.api.db.models.music import groups
def group(name="mock_group", description="mock group", type="affinity",
level="host", members='["test_tenant_id"]'):
"""Boilerplate for creating a group"""
group = models.groups.Group(name=name, description=description, type=type,
group = groups.Group(name=name, description=description, type=type,
level=level, members=members, _insert=False)
group.id = str(uuid.uuid4())
return group