Valet on Stable Newton Devstack
Changes required for Valet to function properly running on the stable newton version of devstack. Task: #4670 Story: #2001035 Change-Id: I5d79fb636e9fe1db446ba0d7e749c59db5b10318
This commit is contained in:
parent
4dcf7970e4
commit
0edc752c46
@ -99,10 +99,9 @@ class ValetFilter(filters.BaseHostFilter):
|
||||
cfg.CONF.register_opts(opts, group=opt_group)
|
||||
|
||||
# TODO(JD): Factor out common code between this and the cinder filter
|
||||
def filter_all(self, filter_obj_list, filter_properties):
|
||||
def filter_all(self, filter_obj_list, request_spec):
|
||||
'''Filter all hosts in one swell foop'''
|
||||
|
||||
hints_key = 'scheduler_hints'
|
||||
orch_id_key = 'heat_resource_uuid'
|
||||
|
||||
ad_hoc = False
|
||||
@ -113,10 +112,9 @@ class ValetFilter(filters.BaseHostFilter):
|
||||
failure_mode = opt[self.opt_failure_mode_str]
|
||||
|
||||
# Get the resource_id (physical id) and host candidates
|
||||
request_spec = filter_properties.get('request_spec')
|
||||
instance_properties = request_spec.get('instance_properties')
|
||||
res_id = instance_properties.get('uuid')
|
||||
res_id = request_spec.instance_uuid
|
||||
hosts = [obj.host for obj in filter_obj_list]
|
||||
hints = request_spec.scheduler_hints
|
||||
|
||||
# TODO(JD): If we can't reach Valet at all, we may opt to fail
|
||||
# TODO(JD): all hosts depending on a TBD config flag.
|
||||
@ -128,6 +126,7 @@ class ValetFilter(filters.BaseHostFilter):
|
||||
self._authorize()
|
||||
except Exception as ex:
|
||||
failed = ex
|
||||
|
||||
if failed:
|
||||
msg = _LW("Failed to filter the hosts, failure mode is %s")
|
||||
LOG.warn(msg % failure_mode)
|
||||
@ -136,16 +135,14 @@ class ValetFilter(filters.BaseHostFilter):
|
||||
yield_all = True
|
||||
else:
|
||||
LOG.error(failed)
|
||||
# if not filter_properties.get(hints_key, {}).has_key(orch_id_key):
|
||||
elif orch_id_key not in filter_properties.get(hints_key, {}):
|
||||
elif orch_id_key not in hints:
|
||||
msg = _LW("Valet: Heat Stack Lifecycle Scheduler Hints not found. "
|
||||
"Performing ad-hoc placement.")
|
||||
LOG.info(msg)
|
||||
ad_hoc = True
|
||||
|
||||
# We'll need the flavor.
|
||||
instance_type = filter_properties.get('instance_type')
|
||||
flavor = instance_type.get('name')
|
||||
flavor = request_spec.flavor.flavorid
|
||||
|
||||
# Because this wasn't orchestrated, there's no stack.
|
||||
# We're going to compose a resource as if there as one.
|
||||
@ -163,7 +160,7 @@ class ValetFilter(filters.BaseHostFilter):
|
||||
|
||||
# Only add the AZ if it was expressly defined
|
||||
res_properties = resources[res_id]["properties"]
|
||||
a_zone = instance_properties.get('availability_zone')
|
||||
a_zone = request_spec.availability_zone
|
||||
if a_zone:
|
||||
res_properties["availability_zone"] = a_zone
|
||||
|
||||
@ -216,7 +213,7 @@ class ValetFilter(filters.BaseHostFilter):
|
||||
else:
|
||||
yield_all = False
|
||||
else:
|
||||
orch_id = filter_properties[hints_key][orch_id_key]
|
||||
orch_id = hints[orch_id_key]
|
||||
|
||||
count = 0
|
||||
response = None
|
||||
|
@ -26,7 +26,7 @@ def _messaging_notifier_from_config(config):
|
||||
transport = messaging.get_notification_transport(cfg.CONF, transport_url)
|
||||
notifier = messaging.Notifier(transport, driver='messaging',
|
||||
publisher_id='valet',
|
||||
topic='notifications', retry=10)
|
||||
topics=['notifications'], retry=10)
|
||||
return notifier
|
||||
|
||||
|
||||
|
@ -17,10 +17,14 @@
|
||||
|
||||
from abc import ABCMeta
|
||||
from abc import abstractmethod
|
||||
from importlib import import_module
|
||||
import inspect
|
||||
import os
|
||||
import pkgutil
|
||||
import uuid
|
||||
|
||||
from pecan import conf
|
||||
import six
|
||||
import uuid
|
||||
|
||||
from valet import api
|
||||
from valet.api.common.i18n import _
|
||||
@ -29,12 +33,13 @@ from valet.common.music import Music
|
||||
|
||||
def get_class(kls):
|
||||
"""Returns a class given a fully qualified class name"""
|
||||
parts = kls.split('.')
|
||||
module = ".".join(parts[:-1])
|
||||
mod = __import__(module)
|
||||
for comp in parts[1:]:
|
||||
mod = getattr(mod, comp)
|
||||
return mod
|
||||
pkg_path = os.path.dirname(__file__)
|
||||
for loader, mod_name, is_pkg in pkgutil.iter_modules([pkg_path]):
|
||||
mod = import_module('valet.api.db.models.music.' + mod_name)
|
||||
cls = getattr(mod, kls, None)
|
||||
if cls:
|
||||
return cls
|
||||
return None
|
||||
|
||||
|
||||
class abstractclassmethod(classmethod): # pylint: disable=C0103,R0903
|
||||
@ -200,8 +205,7 @@ class Query(object):
|
||||
if inspect.isclass(model):
|
||||
self.model = model
|
||||
elif isinstance(model, basestring):
|
||||
self.model = get_class(
|
||||
'valet.api.db.models.music.placements.' + model)
|
||||
self.model = get_class(model)
|
||||
assert inspect.isclass(self.model)
|
||||
|
||||
def __kwargs(self):
|
||||
|
@ -17,12 +17,12 @@ from pecan.commands.base import BaseCommand
|
||||
from valet import api
|
||||
from valet.api.common.i18n import _
|
||||
from valet.api.db import models
|
||||
from valet.api.db.models import Event
|
||||
from valet.api.db.models import Group
|
||||
from valet.api.db.models import Placement
|
||||
from valet.api.db.models import PlacementRequest
|
||||
from valet.api.db.models import PlacementResult
|
||||
from valet.api.db.models import Plan
|
||||
from valet.api.db.models.music.groups import Group
|
||||
from valet.api.db.models.music.ostro import Event
|
||||
from valet.api.db.models.music.ostro import PlacementRequest
|
||||
from valet.api.db.models.music.ostro import PlacementResult
|
||||
from valet.api.db.models.music.placements import Placement
|
||||
from valet.api.db.models.music.plans import Plan
|
||||
from valet.common.conf import get_logger
|
||||
from valet.common.conf import init_conf
|
||||
|
||||
@ -35,8 +35,6 @@ class PopulateCommand(BaseCommand):
|
||||
super(PopulateCommand, self).run(args)
|
||||
try:
|
||||
init_conf("populate.log")
|
||||
# cfg.CONF.log_file = "populate.log"
|
||||
# cfg.CONF.use_stderr = True
|
||||
LOG = api.LOG = get_logger("populate")
|
||||
LOG.info(_("Loading environment"))
|
||||
self.load_app()
|
||||
|
@ -65,12 +65,12 @@ class ListenerManager(threading.Thread):
|
||||
self.MUSIC = {'engine': engine,
|
||||
'keyspace': self.config.music.keyspace}
|
||||
self.listener_logger.debug(
|
||||
'Storing in music on %s, keyspace %s' %
|
||||
(self.config.music.host, self.config.music.keyspace))
|
||||
'Storing in music on %s, keyspace %s',
|
||||
self.config.music.hosts, self.config.music.keyspace)
|
||||
|
||||
self.listener_logger.debug('Connecting to %s, with %s' %
|
||||
(self.config.messaging.host,
|
||||
self.config.messaging.username))
|
||||
self.listener_logger.debug('Connecting to %s, with %s',
|
||||
self.config.messaging.host,
|
||||
self.config.messaging.username)
|
||||
credentials = pika.PlainCredentials(self.config.messaging.username,
|
||||
self.config.messaging.password)
|
||||
parameters = pika.ConnectionParameters(self.config.messaging.host,
|
||||
@ -103,7 +103,7 @@ class ListenerManager(threading.Thread):
|
||||
# Bind the queue to the selected exchange
|
||||
channel.queue_bind(exchange=exchange_name, queue=queue_name,
|
||||
routing_key=binding_key)
|
||||
self.listener_logger.info('Channel is bound,listening on%s '
|
||||
self.listener_logger.info('Channel is bound,listening on %s '
|
||||
'exchange %s',
|
||||
self.config.messaging.host,
|
||||
self.config.events_listener.exchange)
|
||||
|
@ -64,6 +64,7 @@ class Resource(object):
|
||||
def bootstrap_from_db(self, _resource_status):
|
||||
"""Return True if bootsrap resource from database successful."""
|
||||
try:
|
||||
self.logger.info("Resource status from DB = %s", _resource_status)
|
||||
logical_groups = _resource_status.get("logical_groups")
|
||||
if logical_groups:
|
||||
for lgk, lg in logical_groups.iteritems():
|
||||
@ -376,11 +377,11 @@ class Resource(object):
|
||||
self.logger.debug(" vms")
|
||||
debug_msg = " orch_id = %s uuid = %s"
|
||||
for v in lg.vm_list:
|
||||
self.logger.debug(debug_msg % (v[0], v[2]))
|
||||
self.logger.debug(debug_msg, v[0], v[2])
|
||||
self.logger.debug(" hosts")
|
||||
for h, v in lg.vms_per_host.iteritems():
|
||||
self.logger.debug(" host = %s" % h)
|
||||
self.logger.debug(" vms = %s" %
|
||||
self.logger.debug(" host = %s", h)
|
||||
self.logger.debug(" vms = %s",
|
||||
str(len(lg.vms_per_host[h])))
|
||||
host = None
|
||||
if h in self.hosts.keys():
|
||||
@ -508,9 +509,8 @@ class Resource(object):
|
||||
|
||||
if host.status != _st:
|
||||
host.status = _st
|
||||
self.logger.warn(
|
||||
"Resource.update_host_resources: host(%s) status changed" %
|
||||
_hn)
|
||||
self.logger.info(
|
||||
"Resource.update_host_resources: host(%s) status changed", _hn)
|
||||
updated = True
|
||||
|
||||
# FIXME(GJ): should check cpu, memm and disk here?
|
||||
@ -578,7 +578,7 @@ class Resource(object):
|
||||
for lgk in _host.memberships.keys():
|
||||
if lgk not in self.logical_groups.keys():
|
||||
self.logger.warn("logical group (%s) missing while "
|
||||
"removing %s" % (lgk, _h_uuid))
|
||||
"removing %s", lgk, _h_uuid)
|
||||
continue
|
||||
lg = self.logical_groups[lgk]
|
||||
|
||||
@ -618,7 +618,7 @@ class Resource(object):
|
||||
for lgk in _host.memberships.keys():
|
||||
if lgk not in self.logical_groups.keys():
|
||||
self.logger.warn("logical group (%s) missing while "
|
||||
"removing %s" % (lgk, _uuid))
|
||||
"removing %s", lgk, _uuid)
|
||||
continue
|
||||
lg = self.logical_groups[lgk]
|
||||
|
||||
|
@ -190,6 +190,10 @@ class HostGroup(object):
|
||||
for ck in self.child_resources.keys():
|
||||
child_list.append(ck)
|
||||
|
||||
parent_name = None
|
||||
if self.parent_resource:
|
||||
parent_name = self.parent_resource.name
|
||||
|
||||
return {'status': self.status,
|
||||
'host_type': self.host_type,
|
||||
'membership_list': membership_list,
|
||||
@ -202,7 +206,7 @@ class HostGroup(object):
|
||||
'local_disk': self.local_disk_cap,
|
||||
'original_local_disk': self.original_local_disk_cap,
|
||||
'avail_local_disk': self.avail_local_disk_cap,
|
||||
'parent': self.parent_resource.name,
|
||||
'parent': parent_name,
|
||||
'children': child_list,
|
||||
'vm_list': self.vm_list,
|
||||
'last_update': self.last_update}
|
||||
|
@ -104,7 +104,7 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
new_host.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: new host (" +
|
||||
self.logger.info("TopologyManager: new host (" +
|
||||
new_host.name + ") added from configuration")
|
||||
updated = True
|
||||
|
||||
@ -116,7 +116,7 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
host.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: host (" +
|
||||
self.logger.info("TopologyManager: host (" +
|
||||
host.name + ") removed from configuration")
|
||||
updated = True
|
||||
|
||||
@ -127,7 +127,7 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
new_host_group.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: new host_group (" +
|
||||
self.logger.info("TopologyManager: new host_group (" +
|
||||
new_host_group.name + ") added")
|
||||
updated = True
|
||||
|
||||
@ -138,7 +138,7 @@ class TopologyManager(threading.Thread):
|
||||
|
||||
host_group.last_update = time.time()
|
||||
|
||||
self.logger.warn("TopologyManager: host_group (" +
|
||||
self.logger.info("TopologyManager: host_group (" +
|
||||
host_group.name + ") disabled")
|
||||
updated = True
|
||||
|
||||
@ -191,7 +191,7 @@ class TopologyManager(threading.Thread):
|
||||
if "infra" not in _rhost.tag:
|
||||
_rhost.tag.append("infra")
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
||||
self.logger.info("TopologyManager: host (" + _rhost.name +
|
||||
") updated (tag)")
|
||||
|
||||
if (_rhost.host_group is None or
|
||||
@ -203,7 +203,7 @@ class TopologyManager(threading.Thread):
|
||||
else:
|
||||
_rhost.host_group = self.resource.datacenter
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host (" + _rhost.name +
|
||||
self.logger.info("TopologyManager: host (" + _rhost.name +
|
||||
") updated (host_group)")
|
||||
|
||||
return updated
|
||||
@ -214,22 +214,23 @@ class TopologyManager(threading.Thread):
|
||||
if _hg.host_type != _rhg.host_type:
|
||||
_rhg.host_type = _hg.host_type
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (hosting type)")
|
||||
|
||||
if _rhg.status == "disabled":
|
||||
_rhg.status = "enabled"
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (enabled)")
|
||||
|
||||
if _hg.parent_resource != _rhg.parent_resource:
|
||||
if _hg.parent_resource.name in self.resource.host_groups.keys():
|
||||
_rhg.parent_resource = \
|
||||
self.resource.host_groups[_hg.parent_resource.name]
|
||||
else:
|
||||
_rhg.parent_resource = self.resource.datacenter
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (parent host_group)")
|
||||
|
||||
for rk in _hg.child_resources.keys():
|
||||
@ -244,7 +245,7 @@ class TopologyManager(threading.Thread):
|
||||
elif _rhg.host_type == "cluster":
|
||||
_rhg.child_resources[rk] = self.resource.host_groups[rk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (new child host)")
|
||||
|
||||
for rrk in _rhg.child_resources.keys():
|
||||
@ -256,7 +257,7 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del _rhg.child_resources[rrk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: host_group (" + _rhg.name +
|
||||
self.logger.info("TopologyManager: host_group (" + _rhg.name +
|
||||
") updated (child host removed)")
|
||||
|
||||
return updated
|
||||
@ -268,7 +269,7 @@ class TopologyManager(threading.Thread):
|
||||
if rc not in self.resource.datacenter.region_code_list:
|
||||
self.resource.datacenter.region_code_list.append(rc)
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
self.logger.info("TopologyManager: datacenter updated "
|
||||
"(new region code, " + rc + ")")
|
||||
|
||||
code_list = self.resource.datacenter.region_code_list
|
||||
@ -278,7 +279,7 @@ class TopologyManager(threading.Thread):
|
||||
if alen != blen:
|
||||
updated = True
|
||||
self.resource.datacenter.region_code_list = code_list
|
||||
self.logger.warn("datacenter updated (region code removed)")
|
||||
self.logger.info("datacenter updated (region code removed)")
|
||||
|
||||
for rk in _datacenter.resources.keys():
|
||||
exist = False
|
||||
@ -295,7 +296,7 @@ class TopologyManager(threading.Thread):
|
||||
self.resource.datacenter.resources[rk] = \
|
||||
self.resource.hosts[rk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
self.logger.info("TopologyManager: datacenter updated "
|
||||
"(new resource)")
|
||||
|
||||
for rrk in self.resource.datacenter.resources.keys():
|
||||
@ -307,7 +308,7 @@ class TopologyManager(threading.Thread):
|
||||
if exist is False:
|
||||
del self.resource.datacenter.resources[rrk]
|
||||
updated = True
|
||||
self.logger.warn("TopologyManager: datacenter updated "
|
||||
self.logger.info("TopologyManager: datacenter updated "
|
||||
"(resource removed)")
|
||||
|
||||
return updated
|
||||
|
Loading…
Reference in New Issue
Block a user