Matching flavor with id
Fix parallel running Change-Id: I015649e7287cf1ebb254639c24a2e355c6d03fd1
This commit is contained in:
parent
ce6b9b9871
commit
b3c3bdb6fa
@ -41,9 +41,9 @@ class Ostro(object):
|
||||
|
||||
self.db = MusicHandler(self.config, self.logger)
|
||||
if self.db.init_db() is False:
|
||||
self.logger.error("Ostro: error while initializing MUSIC database")
|
||||
self.logger.error("error while initializing MUSIC database")
|
||||
else:
|
||||
self.logger.debug("Ostro: done init music")
|
||||
self.logger.debug("done init music")
|
||||
|
||||
self.resource = Resource(self.db, self.config, self.logger)
|
||||
self.logger.debug("done init resource")
|
||||
@ -111,7 +111,7 @@ class Ostro(object):
|
||||
for t in self.thread_list:
|
||||
t.join()
|
||||
|
||||
self.logger.info("Ostro: exit Ostro")
|
||||
self.logger.info("exit Ostro")
|
||||
|
||||
def stop_ostro(self):
|
||||
"""Stop main engine process."""
|
||||
@ -134,13 +134,13 @@ class Ostro(object):
|
||||
resource_status = self.db.get_resource_status(
|
||||
self.resource.datacenter.name)
|
||||
if resource_status is None:
|
||||
self.logger.error("Ostro: failed to read from table: " + self.config.db_resource_table)
|
||||
self.logger.error("failed to read from table: " + self.config.db_resource_table)
|
||||
return False
|
||||
|
||||
if len(resource_status) > 0:
|
||||
self.logger.info("Ostro: bootstrap from db")
|
||||
self.logger.info("bootstrap from db")
|
||||
if not self.resource.bootstrap_from_db(resource_status):
|
||||
self.logger.error("Ostro: failed to parse bootstrap data!")
|
||||
self.logger.error("failed to parse bootstrap data!")
|
||||
|
||||
self.logger.info("read bootstrap data from OpenStack")
|
||||
if not self._set_hosts():
|
||||
@ -158,7 +158,7 @@ class Ostro(object):
|
||||
self.logger.critical("Ostro.bootstrap failed: " +
|
||||
traceback.format_exc())
|
||||
|
||||
self.logger.info("Ostro: done bootstrap")
|
||||
self.logger.info("done bootstrap")
|
||||
|
||||
return True
|
||||
|
||||
@ -168,7 +168,7 @@ class Ostro(object):
|
||||
self.logger.error("failed to read datacenter topology")
|
||||
return False
|
||||
|
||||
self.logger.debug("done topology bootstrap")
|
||||
self.logger.info("done topology bootstrap")
|
||||
return True
|
||||
|
||||
def _set_hosts(self):
|
||||
@ -177,17 +177,16 @@ class Ostro(object):
|
||||
self.logger.error("failed to read hosts from OpenStack (Nova)")
|
||||
return False
|
||||
|
||||
self.logger.debug("done hosts & groups bootstrap")
|
||||
self.logger.info("done hosts & groups bootstrap")
|
||||
return True
|
||||
|
||||
def _set_flavors(self):
|
||||
self.logger.debug("start flavors bootstrap")
|
||||
if not self.compute.set_flavors():
|
||||
self.status = "OpenStack (Nova) internal error"
|
||||
self.logger.error("failed to read flavors from OpenStack (Nova)")
|
||||
return False
|
||||
|
||||
self.logger.debug("done flavors bootstrap")
|
||||
self.logger.info("done flavors bootstrap")
|
||||
return True
|
||||
|
||||
def place_app(self, _app_data):
|
||||
@ -196,7 +195,7 @@ class Ostro(object):
|
||||
|
||||
for req in _app_data:
|
||||
if req["action"] == "query":
|
||||
self.logger.info("Ostro: start query")
|
||||
self.logger.info("start query")
|
||||
|
||||
query_result = self._query(req)
|
||||
result = self._get_json_results("query", "ok",
|
||||
@ -205,9 +204,9 @@ class Ostro(object):
|
||||
if self.db.put_result(result) is False:
|
||||
return False
|
||||
|
||||
self.logger.info("Ostro: done query")
|
||||
self.logger.info("done query")
|
||||
else:
|
||||
self.logger.info("Ostro: start app placement")
|
||||
self.logger.info("start app placement")
|
||||
|
||||
result = None
|
||||
placement_map = self._place_app(req)
|
||||
@ -222,10 +221,10 @@ class Ostro(object):
|
||||
if self.db.put_result(result) is False:
|
||||
return False
|
||||
|
||||
self.logger.info("Ostro: done app placement")
|
||||
self.logger.info("done app placement")
|
||||
|
||||
end_time = time.time()
|
||||
self.logger.info("Ostro: total decision delay of request = " + str(end_time - start_time) + " sec")
|
||||
self.logger.info("total decision delay of request = " + str(end_time - start_time) + " sec")
|
||||
|
||||
return True
|
||||
|
||||
@ -243,11 +242,11 @@ class Ostro(object):
|
||||
query_result[_q["stack_id"]] = vm_list
|
||||
else:
|
||||
self.status = "unknown paramenter in query"
|
||||
self.logger.warn("Ostro: unknown paramenter in query")
|
||||
self.logger.warn("unknown paramenter in query")
|
||||
query_result[_q["stack_id"]] = None
|
||||
else:
|
||||
self.status = "no paramenter in query"
|
||||
self.logger.warn("Ostro: no parameters in query")
|
||||
self.logger.warn("no parameters in query")
|
||||
query_result[_q["stack_id"]] = None
|
||||
elif _q["type"] == "all_groups":
|
||||
self.data_lock.acquire()
|
||||
@ -255,11 +254,11 @@ class Ostro(object):
|
||||
self.data_lock.release()
|
||||
else:
|
||||
self.status = "unknown query type"
|
||||
self.logger.warn("Ostro: unknown query type")
|
||||
self.logger.warn("unknown query type")
|
||||
query_result[_q["stack_id"]] = None
|
||||
else:
|
||||
self.status = "unknown type in query"
|
||||
self.logger.warn("Ostro: no type in query")
|
||||
self.logger.warn("no type in query")
|
||||
query_result[_q["stack_id"]] = None
|
||||
|
||||
return query_result
|
||||
@ -302,12 +301,12 @@ class Ostro(object):
|
||||
for _, vm in app_topology.vms.iteritems():
|
||||
if self._set_vm_flavor_information(vm) is False:
|
||||
self.status = "fail to set flavor information"
|
||||
self.logger.error("Ostro: failed to set flavor information ")
|
||||
self.logger.error("failed to set flavor information ")
|
||||
return None
|
||||
for _, vg in app_topology.vgroups.iteritems():
|
||||
if self._set_vm_flavor_information(vg) is False:
|
||||
self.status = "fail to set flavor information in a group"
|
||||
self.logger.error("Ostro: failed to set flavor information in a group")
|
||||
self.logger.error("failed to set flavor information in a group")
|
||||
return None
|
||||
|
||||
self.data_lock.acquire()
|
||||
@ -359,7 +358,9 @@ class Ostro(object):
|
||||
"""Reset flavor resource and try again."""
|
||||
if self._set_flavors() is False:
|
||||
return False
|
||||
|
||||
self.resource.update_topology()
|
||||
|
||||
flavor = self.resource.get_flavor(_vm.flavor)
|
||||
if flavor is None:
|
||||
return False
|
||||
@ -534,15 +535,23 @@ class Ostro(object):
|
||||
|
||||
def _add_vm_to_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
|
||||
_local_disk):
|
||||
vm_id = None
|
||||
if _h_uuid is None:
|
||||
vm_id = ("none", "none", _uuid)
|
||||
existing_vm = False
|
||||
if self._check_uuid(_uuid, _host_name) is True:
|
||||
existing_vm = True
|
||||
else:
|
||||
vm_id = (_h_uuid, "none", _uuid)
|
||||
if self._check_h_uuid(_h_uuid, _host_name) is True:
|
||||
existing_vm = True
|
||||
|
||||
self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem,
|
||||
_local_disk)
|
||||
self.resource.update_host_time(_host_name)
|
||||
if existing_vm is False:
|
||||
vm_id = None
|
||||
if _h_uuid is None:
|
||||
vm_id = ("none", "none", _uuid)
|
||||
else:
|
||||
vm_id = (_h_uuid, "none", _uuid)
|
||||
|
||||
self.resource.add_vm_to_host(_host_name, vm_id, _vcpus, _mem,
|
||||
_local_disk)
|
||||
self.resource.update_host_time(_host_name)
|
||||
|
||||
def _remove_vm_from_host(self, _uuid, _h_uuid, _host_name, _vcpus, _mem,
|
||||
_local_disk):
|
||||
|
@ -97,9 +97,6 @@ class Compute(object):
|
||||
if host.name not in logical_group.vms_per_host.keys():
|
||||
logical_group.vms_per_host[host.name] = []
|
||||
|
||||
self.logger.info("adding Host LogicalGroup: " +
|
||||
str(host.__dict__))
|
||||
|
||||
_hosts[host.name] = host
|
||||
|
||||
except (ValueError, KeyError, TypeError):
|
||||
@ -126,9 +123,6 @@ class Compute(object):
|
||||
metadata[mk] = a.metadata.get(mk)
|
||||
aggregate.metadata = metadata
|
||||
|
||||
self.logger.info("adding aggregate LogicalGroup: " +
|
||||
str(aggregate.__dict__))
|
||||
|
||||
_logical_groups[aggregate.name] = aggregate
|
||||
|
||||
for hn in a.hosts:
|
||||
@ -287,7 +281,7 @@ class Compute(object):
|
||||
|
||||
flavor.disk_cap = root_gb + ephemeral_gb + swap_mb / float(1024)
|
||||
|
||||
self.logger.info("adding flavor " + str(flavor.__dict__))
|
||||
# self.logger.info("adding flavor " + str(flavor.__dict__))
|
||||
|
||||
_flavors[flavor.name] = flavor
|
||||
|
||||
|
@ -384,7 +384,7 @@ class ComputeManager(threading.Thread):
|
||||
|
||||
status = compute.set_flavors(flavors)
|
||||
if status != "success":
|
||||
self.logger.error("ComputeManager: " + status)
|
||||
self.logger.error(status)
|
||||
return False
|
||||
|
||||
self.data_lock.acquire()
|
||||
|
@ -1031,12 +1031,20 @@ class Resource(object):
|
||||
str(host.local_disk_cap) + ", avail_local_disk = " +
|
||||
str(host.avail_local_disk_cap))
|
||||
|
||||
def get_flavor(self, _name):
|
||||
def get_flavor(self, _id):
|
||||
"""Return flavor according to name passed in."""
|
||||
flavor = None
|
||||
|
||||
if _name in self.flavors.keys():
|
||||
if self.flavors[_name].status == "enabled":
|
||||
flavor = self.flavors[_name]
|
||||
if _id in self.flavors.keys():
|
||||
flavor = self.flavors[_id]
|
||||
else:
|
||||
for _, f in self.flavors.iteritems():
|
||||
if f.flavor_id == _id:
|
||||
flavor = f
|
||||
break
|
||||
|
||||
if flavor is not None:
|
||||
if flavor.status != "enabled":
|
||||
flavor = None
|
||||
|
||||
return flavor
|
||||
|
@ -44,4 +44,5 @@ opt_valet = \
|
||||
[
|
||||
cfg.IntOpt('TRIES_TO_CREATE', default=5),
|
||||
cfg.IntOpt('PAUSE', default=5),
|
||||
cfg.IntOpt('TRIES_TO_SHOW_SERVER', default=10),
|
||||
]
|
||||
|
@ -18,6 +18,7 @@
|
||||
from collections import defaultdict
|
||||
import os
|
||||
from tempest import config
|
||||
import time
|
||||
import traceback
|
||||
|
||||
CONF = config.CONF
|
||||
@ -37,6 +38,7 @@ class Analyzer(object):
|
||||
self.resource_name = {}
|
||||
self.instance_on_server = {}
|
||||
self.group_instance_name = {}
|
||||
self.tries = CONF.valet.TRIES_TO_SHOW_SERVER
|
||||
|
||||
def check(self, resources):
|
||||
"""Checking if all instances are on the Appropriate hosts and racks."""
|
||||
@ -96,10 +98,19 @@ class Analyzer(object):
|
||||
servers_list = self.nova_client.list_servers()
|
||||
|
||||
for i in range(len(servers_list["servers"])):
|
||||
server = \
|
||||
self.nova_client.show_server(servers_list["servers"][i]["id"])
|
||||
self.instance_on_server[servers_list["servers"][i]["name"]] = \
|
||||
server["server"]["OS-EXT-SRV-ATTR:host"]
|
||||
self.log.log_debug("show_server %s from list %s " %
|
||||
(servers_list["servers"][i]["id"], servers_list["servers"]))
|
||||
try:
|
||||
server = \
|
||||
self.nova_client.show_server(servers_list["servers"][i]["id"])
|
||||
self.instance_on_server[servers_list["servers"][i]["name"]] = \
|
||||
server["server"]["OS-EXT-SRV-ATTR:host"]
|
||||
except Exception:
|
||||
self.log.log_error("Exception trying to show_server: %s" % traceback.format_exc())
|
||||
if self.tries > 0:
|
||||
time.sleep(CONF.valet.PAUSE)
|
||||
self.init_servers_list()
|
||||
self.tries -= 1
|
||||
|
||||
def get_instance_name(self, res_name):
|
||||
"""Return instance name (resource name)."""
|
||||
|
Loading…
x
Reference in New Issue
Block a user