Merge "Replace LOG.warn with LOG.warning"

This commit is contained in:
Zuul 2017-12-29 01:07:23 +00:00 committed by Gerrit Code Review
commit 281951c54a
8 changed files with 52 additions and 51 deletions

View File

@ -315,8 +315,8 @@ class AppHandler(object):
if placement is None:
return None
elif placement.uuid == "none":
LOG.warn("vm (" + rk + ") in original stack missing. "
"Perhaps it was deleted?")
LOG.warning("vm (" + rk + ") in original stack "
"missing. Perhaps it was deleted?")
if rk in _app_topology.stack["placements"].keys():
del _app_topology.stack["placements"][rk]
@ -329,10 +329,10 @@ class AppHandler(object):
if placement.stack_id is None or \
placement.stack_id == "none":
LOG.warn("stack id in valet record is unknown")
LOG.warning("stack id in valet record is unknown")
else:
LOG.warn("stack id in valet record is "
"different")
LOG.warning("stack id in valet record is "
"different")
curr_state = None
if placement.state is None or \
@ -372,8 +372,8 @@ class AppHandler(object):
return _app_topology
else:
LOG.warn("vm (" + rk + ") in original stack does not have"
" uuid")
LOG.warning("vm (" + rk + ") in original stack does not have"
" uuid")
if old_groups is not None and len(old_groups) > 0:
for gk, g in old_groups.iteritems():
@ -512,8 +512,8 @@ class AppHandler(object):
flavor = self.resource.get_flavor(_flavor_name)
if flavor is None:
LOG.warn("not exist flavor (" + _flavor_name + ") and try to "
"refetch")
LOG.warning("not exist flavor (" + _flavor_name + ") and try to "
"refetch")
if not self.metadata.set_flavors():
status = "failed to read flavors from nova"

View File

@ -94,8 +94,8 @@ class DBHandler(object):
try:
args = json.loads(args_data)
except (ValueError, KeyError, TypeError):
LOG.warn("DB: while decoding to json event = " + method +
":" + event_id)
LOG.warning("DB: while decoding to json event = " + method +
":" + event_id)
continue
# TODO(lamt) this block of code can use refactoring
@ -191,11 +191,11 @@ class DBHandler(object):
e.host is None or e.host == "none" or \
e.vcpus == -1 or e.mem == -1:
error_event_list.append(e)
LOG.warn("DB: data missing in instance object event")
LOG.warning("DB: data missing in instance object event")
elif e.object_name == 'ComputeNode':
if e.host is None or e.host == "none":
error_event_list.append(e)
LOG.warn("DB: data missing in compute object event")
LOG.warning("DB: data missing in compute object event")
elif e.method == "build_and_run_instance":
if e.uuid is None or e.uuid == "none":
error_event_list.append(e)

View File

@ -68,7 +68,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0:
self.status = "no candidate for node = " + _n.orch_id
LOG.warn(self.status)
LOG.warning(self.status)
return candidate_list
LOG.debug("num of candidates = " + str(len(candidate_list)))

View File

@ -80,7 +80,7 @@ class Optimizer(object):
if host_name != "none" and \
host_name in _app_topology.candidate_list_map[orch_id]:
LOG.warn("vm is already placed in one of candidate hosts")
LOG.warning("vm is already placed in one of candidate hosts")
if not _app_topology.update_placement_state(orch_id,
host=host_name):
@ -282,7 +282,7 @@ class Optimizer(object):
if host.update_uuid(_orch_id, _uuid) is True:
self.resource.update_host_time(_host_name)
else:
LOG.warn("fail to update uuid in host = " + host.name)
LOG.warning("fail to update uuid in host = " + host.name)
self.resource.update_uuid_in_groups(_orch_id, _uuid, host)

View File

@ -173,7 +173,7 @@ class Ostro(object):
# if decision_key is not None:
# self.ahandler.record_history(decision_key, result)
# else:
# LOG.warn("decision(" + decision_key + ") already made")
# LOG.warning("decision(" + decision_key + ") already made")
# result = old_decision
if app_topology.action in ("ping", "create", "replan",
@ -224,7 +224,7 @@ class Ostro(object):
query_result["status"] = "no type in query"
if query_result["status"] != "ok":
LOG.warn(query_result["status"])
LOG.warning(query_result["status"])
query_result["result"] = None
return query_result

View File

@ -171,7 +171,7 @@ class Search(object):
hk = self.app_topology.planned_vm_map[vk]
if hk not in self.avail_hosts.keys():
# if prior host is not available
LOG.warn("host (" + hk + ") is not available")
LOG.warning("host (" + hk + ") is not available")
continue
if vm.host is None or vm.host == "none":
vm.host = hk
@ -186,12 +186,12 @@ class Search(object):
hk = self.app_topology.planned_vm_map[vk]
if hk not in self.avail_hosts.keys():
# if prior host is not available
LOG.warn("host (" + hk + ") is not available")
LOG.warning("host (" + hk + ") is not available")
continue
if g.host is None or g.host == "none":
resource_name = search_helper.get_resource_of_level(hk, g.level, self.avail_hosts)
if resource_name is None:
LOG.warn("host {} is not available".format(resource_name))
LOG.warning("host {} is not available".format(resource_name))
continue
g.host = resource_name
node = None
@ -305,7 +305,7 @@ class Search(object):
for lgk, lg in self.resource.groups.iteritems():
if lg.status != "enabled" or \
(lg.group_type in ("AFF", "EX", "DIV") and len(lg.vm_list) == 0):
LOG.warn("group (" + lg.name + ") disabled")
LOG.warning("group (" + lg.name + ") disabled")
continue
lgr = GroupResource()
@ -601,7 +601,7 @@ class Search(object):
if resource_name not in resource_list:
resource_list.append(resource_name)
else:
LOG.warn("cannot find candidate resource while replanning")
LOG.warning("cannot find candidate resource while replanning")
for rk in resource_list:
_avail_resources.set_candidate(rk)
@ -648,7 +648,7 @@ class Search(object):
break
else:
if planned_resource is None:
LOG.warn("rollback candidate = " + resource_name)
LOG.warning("rollback candidate = " + resource_name)
self._rollback_resources(_n)
self._rollback_node_placement(_n)
if len(candidate_list) > 0 and \
@ -660,7 +660,7 @@ class Search(object):
if best_resource is None and len(candidate_list) == 0:
if self.app_topology.status == "success":
self.app_topology.status = "no available hosts"
LOG.warn(self.app_topology.status)
LOG.warning(self.app_topology.status)
return best_resource

View File

@ -78,7 +78,7 @@ class ComputeManager(threading.Thread):
def _run(self):
"""Run this batch job."""
if self.set_hosts() is not True:
LOG.warn("fail to set hosts from nova")
LOG.warning("fail to set hosts from nova")
def set_hosts(self):
"""Check any inconsistency and perform garbage collection if necessary.
@ -262,7 +262,7 @@ class ComputeManager(threading.Thread):
else:
if hk != placement.host:
LOG.warn("PANIC: placed in different host")
LOG.warning("PANIC: placed in different host")
vm_info = _hosts[hk].get_vm_info(uuid=vk)
vm_info["stack_id"] = placement.stack_id
@ -275,7 +275,7 @@ class ComputeManager(threading.Thread):
rhost.vm_list.append(vm_info)
inconsistent_hosts[hk] = rhost
LOG.warn("host (" + rhost.name + ") updated (vm added)")
LOG.warning("host (" + rhost.name + ") updated (vm added)")
# FIXME(gjung): add to corresponding groups with
# verification?
@ -283,8 +283,8 @@ class ComputeManager(threading.Thread):
if placement.host in self.resource.hosts.keys():
old_rhost = self.resource.hosts[placement.host]
if old_rhost.remove_vm(uuid=vk) is True:
LOG.warn("host (" + old_rhost.name +
") updated (vm removed)")
LOG.warning("host (" + old_rhost.name + ") "
"updated (vm removed)")
inconsistent_hosts[placement.host] = old_rhost
@ -309,7 +309,7 @@ class ComputeManager(threading.Thread):
new_state = "created"
if placement.state not in ("created", "rebuilt", "migrated"):
LOG.warn("vm is incomplete state = " + placement.state)
LOG.warning("vm is incomplete state = " + placement.state)
if (placement.state == "planned" or
placement.state == "building"):
@ -332,7 +332,7 @@ class ComputeManager(threading.Thread):
for vm_info in rhost.vm_list:
if vm_info["uuid"] is None or vm_info["uuid"] == "none":
LOG.warn("host (" + rhost.name + ") pending vm removed")
LOG.warning("host (" + rhost.name + ") pending vm removed")
deletion_list.append(vm_info)
@ -348,8 +348,8 @@ class ComputeManager(threading.Thread):
return None
if vm_info["uuid"] not in _vm_locations.keys():
LOG.warn("vm is mising with state = " +
placement.state)
LOG.warning("vm is mising with state = " +
placement.state)
deletion_list.append(vm_info)
@ -363,10 +363,11 @@ class ComputeManager(threading.Thread):
return None
elif _vm_locations[vm_info["uuid"]] != rk:
LOG.warn("placed in different host")
LOG.warning("placed in different host")
if rhost.remove_vm(uuid=vm_info["uuid"]) is True:
LOG.warn("host (" + rk + ") updated (vm removed)")
LOG.warning("host (" + rk + ") updated (vm "
"removed)")
inconsistent_hosts[rk] = rhost
@ -376,7 +377,7 @@ class ComputeManager(threading.Thread):
# FIXME(gjung): placement.status?
if len(deletion_list) > 0:
LOG.warn("host (" + rhost.name + ") updated (vms removed)")
LOG.warning("host (" + rhost.name + ") updated (vms removed)")
inconsistent_hosts[rk] = rhost
@ -388,7 +389,7 @@ class ComputeManager(threading.Thread):
rhost, orch_id=vm_info["orch_id"])
else:
if not rhost.remove_vm(uuid=vm_info["uuid"]):
LOG.warn("fail to remove vm from host")
LOG.warning("fail to remove vm from host")
self.resource.remove_vm_from_groups(
rhost, uuid=vm_info["uuid"])

View File

@ -76,7 +76,7 @@ class Resource(object):
self.groups[lgk] = group
if len(self.groups) == 0:
LOG.warn("no groups in db record")
LOG.warning("no groups in db record")
flavors = _resource_status.get("flavors")
if flavors:
@ -92,7 +92,7 @@ class Resource(object):
self.flavors[fk] = flavor
if len(self.flavors) == 0:
LOG.warn("no flavors in db record")
LOG.warning("no flavors in db record")
hosts = _resource_status.get("hosts")
if hosts:
@ -122,7 +122,7 @@ class Resource(object):
self.hosts[hk] = host
if len(self.hosts) == 0:
LOG.warn("no hosts in db record")
LOG.warning("no hosts in db record")
host_groups = _resource_status.get("host_groups")
if host_groups:
@ -149,7 +149,7 @@ class Resource(object):
self.host_groups[hgk] = host_group
if len(self.host_groups) == 0:
LOG.warn("no host_groups (rack)")
LOG.warning("no host_groups (rack)")
dc = _resource_status.get("datacenter")
if dc:
@ -179,7 +179,7 @@ class Resource(object):
self.datacenter.resources[ck] = self.hosts[ck]
if len(self.datacenter.resources) == 0:
LOG.warn("fail loading datacenter")
LOG.warning("fail loading datacenter")
hgs = _resource_status.get("host_groups")
if hgs:
@ -406,7 +406,7 @@ class Resource(object):
host = self.hosts[_vm_alloc["host"]]
if host.exist_vm(orch_id=_vm_info["orch_id"], uuid=_vm_info["uuid"]):
LOG.warn("vm already exists in the host")
LOG.warning("vm already exists in the host")
# host.remove_vm(orch_id=_vm_info["orch_id"],
# uuid=_vm_info["uuid"])
@ -440,7 +440,7 @@ class Resource(object):
host.disk_available_least += _vm_alloc["local_volume"]
return True
else:
LOG.warn("vm to be removed not exist")
LOG.warning("vm to be removed not exist")
return False
def update_host_resources(self, _hn, _st):
@ -448,7 +448,7 @@ class Resource(object):
host = self.hosts[_hn]
if host.status != _st:
host.status = _st
LOG.warn("host(" + _hn + ") status changed")
LOG.warning("host(" + _hn + ") status changed")
return True
else:
return False
@ -488,7 +488,7 @@ class Resource(object):
else:
success = False
else:
LOG.warn("host not found while adding group")
LOG.warning("host not found while adding group")
return False
return success
@ -505,7 +505,7 @@ class Resource(object):
if lg.add_vm(_vm_info, _host.name) is True:
lg.last_update = time.time()
else:
LOG.warn("vm already exists in group")
LOG.warning("vm already exists in group")
elif isinstance(_host, HostGroup):
if lg.group_type == "EX" or \
lg.group_type == "AFF" or lg.group_type == "DIV":
@ -513,9 +513,9 @@ class Resource(object):
if lg.add_vm(_vm_info, _host.name) is True:
lg.last_update = time.time()
else:
LOG.warn("vm already exists in group")
LOG.warning("vm already exists in group")
else:
LOG.warn("nof found group while adding vm")
LOG.warning("nof found group while adding vm")
if isinstance(_host, Host) and _host.host_group is not None:
self.add_vm_to_groups(_host.host_group, _vm_info, _groups_of_vm)
@ -567,7 +567,7 @@ class Resource(object):
for lgk in _host.memberships.keys():
if lgk not in self.groups.keys():
LOG.warn("group (" + lgk + ") already removed")
LOG.warning("group (" + lgk + ") already removed")
continue
lg = self.groups[lgk]