Resolve affinity group with flavor id
Fix tempest exclusivity test default_ram_allocation_ratio to float cpu&disk ratio to float incorrect types for conf options valet-engine does not restart after manual kill Change-Id: I3eae2bb2144a5857dae17b99fb868b57116cef05
This commit is contained in:
parent
6185beead9
commit
fc0d8fc3fc
@ -4,7 +4,7 @@ if [ -z $VALET_KEYSPACE ]; then
|
|||||||
echo "ERR: VALET_KEYSPACE is not defined."
|
echo "ERR: VALET_KEYSPACE is not defined."
|
||||||
exit
|
exit
|
||||||
else
|
else
|
||||||
sed -ie "s/#VALET_KEYSPACE#/${VALET_KEYSPACE}/g" ./populate.cql
|
sed -i.bak "s/#VALET_KEYSPACE#/${VALET_KEYSPACE}/g" ./populate.cql
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z $CASSANDRA_BIN ]; then
|
if [ -z $CASSANDRA_BIN ]; then
|
||||||
|
@ -26,7 +26,7 @@ def terminate_thread(thread):
|
|||||||
if not thread.isAlive():
|
if not thread.isAlive():
|
||||||
return
|
return
|
||||||
|
|
||||||
print('valet watcher thread: notifier thread is alive... - kill it...')
|
# print('valet watcher thread: notifier thread is alive... - kill it...')
|
||||||
exc = ctypes.py_object(SystemExit)
|
exc = ctypes.py_object(SystemExit)
|
||||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)
|
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(thread.ident), exc)
|
||||||
if res == 0:
|
if res == 0:
|
||||||
@ -36,4 +36,4 @@ def terminate_thread(thread):
|
|||||||
# and you should call it again with exc=NULL to revert the effect
|
# and you should call it again with exc=NULL to revert the effect
|
||||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
|
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread.ident, None)
|
||||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||||
print('valet watcher thread exits')
|
# print('valet watcher thread exits')
|
||||||
|
@ -211,18 +211,13 @@ class Query(object):
|
|||||||
|
|
||||||
def __rows_to_objects(self, rows):
|
def __rows_to_objects(self, rows):
|
||||||
"""Convert query response rows to objects"""
|
"""Convert query response rows to objects"""
|
||||||
try:
|
results = []
|
||||||
results = []
|
pk_name = self.model.pk_name() # pylint: disable=E1101
|
||||||
pk_name = self.model.pk_name() # pylint: disable=E1101
|
for __, row in rows.iteritems(): # pylint: disable=W0612
|
||||||
for __, row in rows.iteritems(): # pylint: disable=W0612
|
the_id = row.pop(pk_name)
|
||||||
the_id = row.pop(pk_name)
|
result = self.model(_insert=False, **row)
|
||||||
result = self.model(_insert=False, **row)
|
setattr(result, pk_name, the_id)
|
||||||
setattr(result, pk_name, the_id)
|
results.append(result)
|
||||||
results.append(result)
|
|
||||||
except Exception:
|
|
||||||
import traceback
|
|
||||||
print(traceback.format_exc())
|
|
||||||
|
|
||||||
return Results(results)
|
return Results(results)
|
||||||
|
|
||||||
def all(self):
|
def all(self):
|
||||||
|
@ -76,12 +76,7 @@ class ErrorsController(object):
|
|||||||
response.status = 401
|
response.status = 401
|
||||||
response.content_type = 'text/plain'
|
response.content_type = 'text/plain'
|
||||||
LOG.error('unauthorized')
|
LOG.error('unauthorized')
|
||||||
import traceback
|
|
||||||
traceback.print_stack()
|
|
||||||
LOG.error(self.__class__)
|
|
||||||
LOG.error(kw)
|
|
||||||
response.body = _('Authentication required')
|
response.body = _('Authentication required')
|
||||||
LOG.error(response.body)
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
@expose('json')
|
@expose('json')
|
||||||
|
@ -37,13 +37,7 @@ class ConnectionError(Exception):
|
|||||||
|
|
||||||
|
|
||||||
def print_verbose(verbose, url, headers, body, rest_cmd, timeout):
|
def print_verbose(verbose, url, headers, body, rest_cmd, timeout):
|
||||||
"""Print verbose data."""
|
pass
|
||||||
# TODO(Chris Martin): Replace prints with logs
|
|
||||||
if verbose:
|
|
||||||
print("Sending Request:\nurl: %s\nheaders: "
|
|
||||||
"%s\nbody: %s\ncmd: %s\ntimeout: %d\n"
|
|
||||||
% (url, headers, body,
|
|
||||||
rest_cmd.__name__ if rest_cmd is not None else None, timeout))
|
|
||||||
|
|
||||||
|
|
||||||
def pretty_print_json(json_thing, sort=True, indents=4):
|
def pretty_print_json(json_thing, sort=True, indents=4):
|
||||||
|
@ -31,7 +31,8 @@ ostro_cli_opts = [
|
|||||||
engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf')
|
engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf')
|
||||||
engine_opts = [
|
engine_opts = [
|
||||||
cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'),
|
cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'),
|
||||||
cfg.StrOpt('mode', default='live', help='sim will let Ostro simulate datacenter, while live will let it handle a real datacenter'),
|
cfg.StrOpt('mode', default='live', help='sim will let Ostro simulate datacenter, '
|
||||||
|
'while live will let it handle a real datacenter'),
|
||||||
cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'),
|
cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'),
|
||||||
cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'),
|
cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'),
|
||||||
cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'),
|
cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'),
|
||||||
@ -56,18 +57,19 @@ engine_opts = [
|
|||||||
help='Set trigger time or frequency for checking datacenter topology (i.e., call AIC Formation)'),
|
help='Set trigger time or frequency for checking datacenter topology (i.e., call AIC Formation)'),
|
||||||
cfg.IntOpt('topology_trigger_frequency', default=3600,
|
cfg.IntOpt('topology_trigger_frequency', default=3600,
|
||||||
help='Set trigger time or frequency for checking datacenter topology (i.e., call AIC Formation)'),
|
help='Set trigger time or frequency for checking datacenter topology (i.e., call AIC Formation)'),
|
||||||
cfg.IntOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. '
|
cfg.FloatOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. Note that '
|
||||||
'Note that each compute node can have its own ratios'),
|
'each compute node can have its own ratios'),
|
||||||
cfg.IntOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. '
|
cfg.FloatOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. Note that '
|
||||||
'Note that each compute node can have its own ratios'),
|
'each compute node can have its own ratios'),
|
||||||
cfg.IntOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. '
|
cfg.FloatOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. Note that '
|
||||||
'Note that each compute node can have its own ratios'),
|
'each compute node can have its own ratios'),
|
||||||
cfg.IntOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
cfg.FloatOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||||
'that are set aside for applications workload spikes.'),
|
'that are set aside for applications workload spikes.'),
|
||||||
cfg.IntOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
cfg.FloatOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||||
'that are set aside for applications workload spikes.'),
|
'that are set aside for applications workload spikes.'),
|
||||||
cfg.IntOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
cfg.FloatOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||||
'that are set aside for applications workload spikes.'),
|
'that are set aside for applications workload '
|
||||||
|
'spikes.'),
|
||||||
] + logger_conf("engine")
|
] + logger_conf("engine")
|
||||||
|
|
||||||
listener_group = cfg.OptGroup(name='events_listener',
|
listener_group = cfg.OptGroup(name='events_listener',
|
||||||
|
@ -14,7 +14,6 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
"""App Topology Parser.
|
"""App Topology Parser.
|
||||||
|
|
||||||
- Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV
|
- Restrictions of nested groups: EX in EX, EX in DIV, DIV in EX, DIV in DIV
|
||||||
- VM/group cannot exist in multiple EX groups
|
- VM/group cannot exist in multiple EX groups
|
||||||
- Nested group's level cannot be higher than nesting group
|
- Nested group's level cannot be higher than nesting group
|
||||||
@ -26,6 +25,7 @@
|
|||||||
OS::Heat::ResourceGroup
|
OS::Heat::ResourceGroup
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import six
|
||||||
from valet.engine.optimizer.app_manager.app_topology_base \
|
from valet.engine.optimizer.app_manager.app_topology_base \
|
||||||
import VGroup, VGroupLink, VM, VMLink, LEVELS
|
import VGroup, VGroupLink, VM, VMLink, LEVELS
|
||||||
|
|
||||||
@ -92,7 +92,11 @@ class Parser(object):
|
|||||||
else:
|
else:
|
||||||
vm.name = vm.uuid
|
vm.name = vm.uuid
|
||||||
|
|
||||||
vm.flavor = r["properties"]["flavor"]
|
flavor_id = r["properties"]["flavor"]
|
||||||
|
if isinstance(flavor_id, six.string_types):
|
||||||
|
vm.flavor = flavor_id
|
||||||
|
else:
|
||||||
|
vm.flavor = str(flavor_id)
|
||||||
|
|
||||||
if "availability_zone" in r["properties"].keys():
|
if "availability_zone" in r["properties"].keys():
|
||||||
az = r["properties"]["availability_zone"]
|
az = r["properties"]["availability_zone"]
|
||||||
|
@ -564,6 +564,8 @@ class MusicHandler(object):
|
|||||||
self.logger.error("DB: " + str(e))
|
self.logger.error("DB: " + str(e))
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
self.logger.info("DB: resource status updated")
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def update_resource_log_index(self, _k, _index):
|
def update_resource_log_index(self, _k, _index):
|
||||||
|
@ -140,11 +140,11 @@ class Ostro(object):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
if len(resource_status) > 0:
|
if len(resource_status) > 0:
|
||||||
self.logger.info("bootstrap from db")
|
self.logger.info("bootstrap from DB")
|
||||||
if not self.resource.bootstrap_from_db(resource_status):
|
if not self.resource.bootstrap_from_db(resource_status):
|
||||||
self.logger.error("failed to parse bootstrap data!")
|
self.logger.error("failed to parse bootstrap data!")
|
||||||
|
|
||||||
self.logger.info("read bootstrap data from OpenStack")
|
self.logger.info("bootstrap from OpenStack")
|
||||||
if not self._set_hosts():
|
if not self._set_hosts():
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -303,12 +303,12 @@ class Ostro(object):
|
|||||||
for _, vm in app_topology.vms.iteritems():
|
for _, vm in app_topology.vms.iteritems():
|
||||||
if self._set_vm_flavor_information(vm) is False:
|
if self._set_vm_flavor_information(vm) is False:
|
||||||
self.status = "fail to set flavor information"
|
self.status = "fail to set flavor information"
|
||||||
self.logger.error("failed to set flavor information ")
|
self.logger.error(self.status)
|
||||||
return None
|
return None
|
||||||
for _, vg in app_topology.vgroups.iteritems():
|
for _, vg in app_topology.vgroups.iteritems():
|
||||||
if self._set_vm_flavor_information(vg) is False:
|
if self._set_vm_flavor_information(vg) is False:
|
||||||
self.status = "fail to set flavor information in a group"
|
self.status = "fail to set flavor information in a group"
|
||||||
self.logger.error("failed to set flavor information in a group")
|
self.logger.error(self.status)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
self.data_lock.acquire()
|
self.data_lock.acquire()
|
||||||
@ -341,12 +341,12 @@ class Ostro(object):
|
|||||||
|
|
||||||
def _set_vm_flavor_information(self, _v):
|
def _set_vm_flavor_information(self, _v):
|
||||||
if isinstance(_v, VM):
|
if isinstance(_v, VM):
|
||||||
if self._set_vm_flavor_properties(_v) is False:
|
return self._set_vm_flavor_properties(_v)
|
||||||
return False
|
|
||||||
else: # affinity group
|
else: # affinity group
|
||||||
for _, sg in _v.subvgroups.iteritems():
|
for _, sg in _v.subvgroups.iteritems():
|
||||||
if self._set_vm_flavor_information(sg) is False:
|
if self._set_vm_flavor_information(sg) is False:
|
||||||
return False
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def _set_vm_flavor_properties(self, _vm):
|
def _set_vm_flavor_properties(self, _vm):
|
||||||
flavor = self.resource.get_flavor(_vm.flavor)
|
flavor = self.resource.get_flavor(_vm.flavor)
|
||||||
|
@ -403,12 +403,13 @@ class ComputeManager(threading.Thread):
|
|||||||
fk + ") added")
|
fk + ") added")
|
||||||
|
|
||||||
for rfk in self.resource.flavors.keys():
|
for rfk in self.resource.flavors.keys():
|
||||||
|
rf = self.resource.flavors[rfk]
|
||||||
if rfk not in _flavors.keys():
|
if rfk not in _flavors.keys():
|
||||||
self.resource.flavors[rfk].status = "disabled"
|
rf.status = "disabled"
|
||||||
|
|
||||||
self.resource.flavors[rfk].last_update = time.time()
|
rf.last_update = time.time()
|
||||||
self.logger.warn("ComputeManager: flavor (" +
|
self.logger.warn("ComputeManager: flavor (" + rfk + ":" +
|
||||||
rfk + ") removed")
|
rf.flavor_id + ") removed")
|
||||||
|
|
||||||
for fk in _flavors.keys():
|
for fk in _flavors.keys():
|
||||||
f = _flavors[fk]
|
f = _flavors[fk]
|
||||||
@ -416,8 +417,8 @@ class ComputeManager(threading.Thread):
|
|||||||
|
|
||||||
if self._check_flavor_spec_update(f, rf) is True:
|
if self._check_flavor_spec_update(f, rf) is True:
|
||||||
rf.last_update = time.time()
|
rf.last_update = time.time()
|
||||||
self.logger.warn("ComputeManager: flavor (" +
|
self.logger.warn("ComputeManager: flavor (" + fk + ":" +
|
||||||
fk + ") spec updated")
|
rf.flavor_id + ") spec updated")
|
||||||
|
|
||||||
def _check_flavor_spec_update(self, _f, _rf):
|
def _check_flavor_spec_update(self, _f, _rf):
|
||||||
spec_updated = False
|
spec_updated = False
|
||||||
|
@ -672,31 +672,31 @@ class Resource(object):
|
|||||||
|
|
||||||
hs.last_update = time.time()
|
hs.last_update = time.time()
|
||||||
|
|
||||||
|
# called from handle_events
|
||||||
def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem,
|
def update_host_resources(self, _hn, _st, _vcpus, _vcpus_used, _mem, _fmem,
|
||||||
_ldisk, _fldisk, _avail_least):
|
_ldisk, _fldisk, _avail_least):
|
||||||
"""Return True if status or compute resources avail on host changed."""
|
|
||||||
updated = False
|
updated = False
|
||||||
|
|
||||||
host = self.hosts[_hn]
|
host = self.hosts[_hn]
|
||||||
|
|
||||||
if host.status != _st:
|
if host.status != _st:
|
||||||
host.status = _st
|
host.status = _st
|
||||||
self.logger.debug("Resource.update_host_resources: host status "
|
self.logger.debug("Resource.update_host_resources: host(" + _hn +
|
||||||
"changed")
|
") status changed")
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
if host.original_vCPUs != _vcpus or \
|
if host.original_vCPUs != _vcpus or \
|
||||||
host.vCPUs_used != _vcpus_used:
|
host.vCPUs_used != _vcpus_used:
|
||||||
self.logger.debug("Resource.update_host_resources: host cpu "
|
self.logger.debug("Resource.update_host_resources: host(" + _hn +
|
||||||
"changed")
|
") cpu changed")
|
||||||
host.original_vCPUs = _vcpus
|
host.original_vCPUs = _vcpus
|
||||||
host.vCPUs_used = _vcpus_used
|
host.vCPUs_used = _vcpus_used
|
||||||
updated = True
|
updated = True
|
||||||
|
|
||||||
if host.free_mem_mb != _fmem or \
|
if host.free_mem_mb != _fmem or \
|
||||||
host.original_mem_cap != _mem:
|
host.original_mem_cap != _mem:
|
||||||
self.logger.debug("Resource.update_host_resources: host mem "
|
self.logger.debug("Resource.update_host_resources: host(" + _hn +
|
||||||
"changed")
|
") mem changed")
|
||||||
host.free_mem_mb = _fmem
|
host.free_mem_mb = _fmem
|
||||||
host.original_mem_cap = _mem
|
host.original_mem_cap = _mem
|
||||||
updated = True
|
updated = True
|
||||||
@ -704,8 +704,9 @@ class Resource(object):
|
|||||||
if host.free_disk_gb != _fldisk or \
|
if host.free_disk_gb != _fldisk or \
|
||||||
host.original_local_disk_cap != _ldisk or \
|
host.original_local_disk_cap != _ldisk or \
|
||||||
host.disk_available_least != _avail_least:
|
host.disk_available_least != _avail_least:
|
||||||
self.logger.debug("Resource.update_host_resources: host disk "
|
self.logger.debug("Resource.update_host_resources: host(" + _hn +
|
||||||
"changed")
|
") disk changed")
|
||||||
|
|
||||||
host.free_disk_gb = _fldisk
|
host.free_disk_gb = _fldisk
|
||||||
host.original_local_disk_cap = _ldisk
|
host.original_local_disk_cap = _ldisk
|
||||||
host.disk_available_least = _avail_least
|
host.disk_available_least = _avail_least
|
||||||
|
@ -49,9 +49,9 @@ priority=1
|
|||||||
host=valet1
|
host=valet1
|
||||||
user=m04060
|
user=m04060
|
||||||
stand_by_list=valet2
|
stand_by_list=valet2
|
||||||
start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c restart'" % (user, host)
|
start="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c restart'"
|
||||||
stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host)
|
stop="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'"
|
||||||
test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/health_checker.py ; exit $?'" % (user, host)
|
test="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/health_checker.py ; exit $?'"
|
||||||
|
|
||||||
|
|
||||||
[ValetApi]
|
[ValetApi]
|
||||||
@ -60,7 +60,7 @@ priority=1
|
|||||||
host=valet1
|
host=valet1
|
||||||
stand_by_list=valet2
|
stand_by_list=valet2
|
||||||
user=m04060
|
user=m04060
|
||||||
start="ssh -o ConnectTimeout=1 %s@%s 'sudo service apache2 restart'" % (user, host)
|
start="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'sudo service apache2 restart'"
|
||||||
stop="ssh -o ConnectTimeout=1 %s@%s 'sudo apachectl stop'" % (user, host)
|
stop="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'sudo apachectl stop'"
|
||||||
test="exit $(wget -T 1 -t 1 -qO- http://%s:8090/v1 | grep CURRENT | wc -l)" % (host)
|
test="exit $(wget -T 1 -t 1 -qO- http://%(host)s:8090/v1 | grep CURRENT | wc -l)"
|
||||||
|
|
||||||
|
@ -215,7 +215,7 @@ class HaValetThread (threading.Thread):
|
|||||||
user = self.data.get(USER, None)
|
user = self.data.get(USER, None)
|
||||||
self.use(user)
|
self.use(user)
|
||||||
my_priority = int(self.data.get(PRIORITY, 1))
|
my_priority = int(self.data.get(PRIORITY, 1))
|
||||||
start_command = eval(self.data.get(START_COMMAND, None))
|
start_command = self.data.get(START_COMMAND, None)
|
||||||
stop_command = self.data.get(STOP_COMMAND, None)
|
stop_command = self.data.get(STOP_COMMAND, None)
|
||||||
test_command = self.data.get(TEST_COMMAND, None)
|
test_command = self.data.get(TEST_COMMAND, None)
|
||||||
standby_list = self.data.get(STAND_BY_LIST)
|
standby_list = self.data.get(STAND_BY_LIST)
|
||||||
@ -247,31 +247,50 @@ class HaValetThread (threading.Thread):
|
|||||||
continue
|
continue
|
||||||
|
|
||||||
self.log.info('checking status on - ' + host_in_list)
|
self.log.info('checking status on - ' + host_in_list)
|
||||||
|
<<<<<<< HEAD
|
||||||
host = host_in_list
|
host = host_in_list
|
||||||
host_active, host_priority = \
|
host_active, host_priority = \
|
||||||
self._is_active(eval(test_command))
|
self._is_active(eval(test_command))
|
||||||
host = self.data.get(HOST, 'localhost')
|
host = self.data.get(HOST, 'localhost')
|
||||||
self.log.info(host_in_list + ' - host_active = ' +
|
self.log.info(host_in_list + ' - host_active = ' +
|
||||||
str(host_active) + ', ' + str(host_priority))
|
str(host_active) + ', ' + str(host_priority))
|
||||||
|
=======
|
||||||
|
# host = host_in_list
|
||||||
|
host_active, host_priority = self._is_active(test_command % {'host': host_in_list, 'user': user})
|
||||||
|
# host = self.data.get(HOST, 'localhost')
|
||||||
|
self.log.info(host_in_list + ' - host_active = ' + str(host_active) + ', ' + str(host_priority))
|
||||||
|
>>>>>>> da5d947... Resolve affinity group with flavor id
|
||||||
# Check for split brain: 2 valets active
|
# Check for split brain: 2 valets active
|
||||||
if i_am_active and host_active:
|
if i_am_active and host_active:
|
||||||
self.log.info('found two live instances, '
|
self.log.info('found two live instances, '
|
||||||
'checking priorities')
|
'checking priorities')
|
||||||
should_be_active = self._should_be_active(host_priority, my_priority)
|
should_be_active = self._should_be_active(host_priority, my_priority)
|
||||||
if should_be_active:
|
if should_be_active:
|
||||||
|
<<<<<<< HEAD
|
||||||
self.log.info('deactivate myself, ' + host_in_list +
|
self.log.info('deactivate myself, ' + host_in_list +
|
||||||
' already running')
|
' already running')
|
||||||
# Deactivate myself
|
# Deactivate myself
|
||||||
self._deactivate_process(eval(stop_command))
|
self._deactivate_process(eval(stop_command))
|
||||||
|
=======
|
||||||
|
self.log.info('deactivate myself, ' + host_in_list + ' already running')
|
||||||
|
self._deactivate_process(stop_command % {'host': host, 'user': user}) # Deactivate myself
|
||||||
|
>>>>>>> da5d947... Resolve affinity group with flavor id
|
||||||
i_am_active = False
|
i_am_active = False
|
||||||
else:
|
else:
|
||||||
self.log.info('deactivate ' + self.data[NAME] +
|
self.log.info('deactivate ' + self.data[NAME] +
|
||||||
' on ' + host_in_list +
|
' on ' + host_in_list +
|
||||||
', already running here')
|
', already running here')
|
||||||
|
<<<<<<< HEAD
|
||||||
host = host_in_list
|
host = host_in_list
|
||||||
# Deactivate other valet
|
# Deactivate other valet
|
||||||
self._deactivate_process(eval(stop_command))
|
self._deactivate_process(eval(stop_command))
|
||||||
host = self.data.get(HOST, 'localhost')
|
host = self.data.get(HOST, 'localhost')
|
||||||
|
=======
|
||||||
|
# host = host_in_list
|
||||||
|
# Deactivate other valet
|
||||||
|
self._deactivate_process(stop_command % {'host': host_in_list, 'user': user})
|
||||||
|
# host = self.data.get(HOST, 'localhost')
|
||||||
|
>>>>>>> da5d947... Resolve affinity group with flavor id
|
||||||
|
|
||||||
# Track that at-least one valet is active
|
# Track that at-least one valet is active
|
||||||
any_active = any_active or host_active
|
any_active = any_active or host_active
|
||||||
@ -301,6 +320,7 @@ class HaValetThread (threading.Thread):
|
|||||||
|
|
||||||
last_start = now
|
last_start = now
|
||||||
priority_wait = False
|
priority_wait = False
|
||||||
|
<<<<<<< HEAD
|
||||||
if (not i_am_active and my_priority == PRIMARY_SETUP) or \
|
if (not i_am_active and my_priority == PRIMARY_SETUP) or \
|
||||||
(standby_list is not None):
|
(standby_list is not None):
|
||||||
self.log.info('no running instance found, '
|
self.log.info('no running instance found, '
|
||||||
@ -313,6 +333,16 @@ class HaValetThread (threading.Thread):
|
|||||||
'on %s; last start %s' % (host, diff))
|
'on %s; last start %s' % (host, diff))
|
||||||
self._activate_process(start_command, my_priority)
|
self._activate_process(start_command, my_priority)
|
||||||
host = self.data.get(HOST, 'localhost')
|
host = self.data.get(HOST, 'localhost')
|
||||||
|
=======
|
||||||
|
if (not i_am_active and my_priority == PRIMARY_SETUP) or (standby_list is not None):
|
||||||
|
self.log.info('no running instance found, starting here; last start %s' % diff)
|
||||||
|
self._activate_process(start_command % {'host': host, 'user': user}, my_priority)
|
||||||
|
else:
|
||||||
|
# host = standby_list[0] # LIMITATION - supporting only 1 stand by host
|
||||||
|
self.log.info('no running instances found, starting on %s; last start %s' % (host, diff))
|
||||||
|
self._activate_process(start_command % {'host': standby_list[0], 'user': user}, my_priority)
|
||||||
|
# host = self.data.get(HOST, 'localhost')
|
||||||
|
>>>>>>> da5d947... Resolve affinity group with flavor id
|
||||||
else:
|
else:
|
||||||
priority_wait = True
|
priority_wait = True
|
||||||
else:
|
else:
|
||||||
@ -405,6 +435,7 @@ class HAValet(object):
|
|||||||
os.makedirs(LOG_DIR)
|
os.makedirs(LOG_DIR)
|
||||||
self.log = None
|
self.log = None
|
||||||
|
|
||||||
|
<<<<<<< HEAD
|
||||||
@DeprecationWarning
|
@DeprecationWarning
|
||||||
def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE,
|
def _parse_valet_conf_v010(self, conf_file_name=DEFAULT_CONF_FILE,
|
||||||
process=''):
|
process=''):
|
||||||
@ -455,6 +486,8 @@ class HAValet(object):
|
|||||||
print('unable to open %s file for some reason' % conf_file_name)
|
print('unable to open %s file for some reason' % conf_file_name)
|
||||||
return cdata
|
return cdata
|
||||||
|
|
||||||
|
=======
|
||||||
|
>>>>>>> da5d947... Resolve affinity group with flavor id
|
||||||
def _valid_process_conf_data(self, process_data):
|
def _valid_process_conf_data(self, process_data):
|
||||||
"""Valid Process conf data.
|
"""Valid Process conf data.
|
||||||
|
|
||||||
|
@ -49,9 +49,9 @@ priority=2
|
|||||||
host=valet2
|
host=valet2
|
||||||
user=m04060
|
user=m04060
|
||||||
stand_by_list=valet1
|
stand_by_list=valet1
|
||||||
start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c restart'" % (user, host)
|
start="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c restart'"
|
||||||
stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host)
|
stop="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'"
|
||||||
test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/health_checker.py ; exit $?'" % (user, host)
|
test="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/health_checker.py ; exit $?'"
|
||||||
|
|
||||||
|
|
||||||
[ValetApi]
|
[ValetApi]
|
||||||
@ -60,8 +60,7 @@ priority=2
|
|||||||
host=valet2
|
host=valet2
|
||||||
stand_by_list=valet1
|
stand_by_list=valet1
|
||||||
user=m04060
|
user=m04060
|
||||||
start="ssh -o ConnectTimeout=1 %s@%s 'sudo service apache2 restart'" % (user, host)
|
start="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'sudo service apache2 restart'
|
||||||
stop="ssh -o ConnectTimeout=1 %s@%s 'sudo apachectl stop'" % (user, host)
|
stop="ssh -o ConnectTimeout=1 %(user)s@%(host)s 'sudo apachectl stop'"
|
||||||
test="exit $(wget -T 1 -t 1 -qO- http://%s:8090/v1 | grep CURRENT | wc -l)" % (host)
|
test="exit $(wget -T 1 -t 1 -qO- http://%(host)s:8090/v1 | grep CURRENT | wc -l)"
|
||||||
|
|
||||||
|
|
||||||
|
@ -36,8 +36,9 @@ class Analyzer(object):
|
|||||||
self.stack_identifier = stack_id
|
self.stack_identifier = stack_id
|
||||||
self.log = logger
|
self.log = logger
|
||||||
self.resource_name = {}
|
self.resource_name = {}
|
||||||
self.instance_on_server = {}
|
self.host_instance_dict = {}
|
||||||
self.group_instance_name = {}
|
self.group_instance_name = {}
|
||||||
|
self.instances_on_host = defaultdict(list)
|
||||||
self.tries = CONF.valet.TRIES_TO_SHOW_SERVER
|
self.tries = CONF.valet.TRIES_TO_SHOW_SERVER
|
||||||
|
|
||||||
def check(self, resources):
|
def check(self, resources):
|
||||||
@ -45,7 +46,6 @@ class Analyzer(object):
|
|||||||
self.log.log_info("Starting to check instances location")
|
self.log.log_info("Starting to check instances location")
|
||||||
result = True
|
result = True
|
||||||
|
|
||||||
self.init_servers_list()
|
|
||||||
self.init_resources(resources)
|
self.init_resources(resources)
|
||||||
ins_group = self.init_instances_for_group(resources)
|
ins_group = self.init_instances_for_group(resources)
|
||||||
|
|
||||||
@ -73,6 +73,7 @@ class Analyzer(object):
|
|||||||
|
|
||||||
def init_instances_for_group(self, resources):
|
def init_instances_for_group(self, resources):
|
||||||
"""Init instances for a group with the given resources."""
|
"""Init instances for a group with the given resources."""
|
||||||
|
self.log.log_info("initializing instances for group")
|
||||||
ins_group = defaultdict(list)
|
ins_group = defaultdict(list)
|
||||||
|
|
||||||
for grp in resources.groups.keys():
|
for grp in resources.groups.keys():
|
||||||
@ -95,22 +96,27 @@ class Analyzer(object):
|
|||||||
|
|
||||||
def init_servers_list(self):
|
def init_servers_list(self):
|
||||||
"""Init server list from nova client."""
|
"""Init server list from nova client."""
|
||||||
|
self.log.log_info("initializing the servers list")
|
||||||
servers_list = self.nova_client.list_servers()
|
servers_list = self.nova_client.list_servers()
|
||||||
|
|
||||||
for i in range(len(servers_list["servers"])):
|
try:
|
||||||
self.log.log_debug("show_server %s from list %s " %
|
for i in range(len(servers_list["servers"])):
|
||||||
(servers_list["servers"][i]["id"], servers_list["servers"]))
|
server = self.nova_client.show_server(servers_list["servers"][i]["id"])
|
||||||
try:
|
host_name = server["server"]["OS-EXT-SRV-ATTR:host"]
|
||||||
server = \
|
instance_name = servers_list["servers"][i]["name"]
|
||||||
self.nova_client.show_server(servers_list["servers"][i]["id"])
|
|
||||||
self.instance_on_server[servers_list["servers"][i]["name"]] = \
|
self.host_instance_dict[instance_name] = host_name
|
||||||
server["server"]["OS-EXT-SRV-ATTR:host"]
|
self.instances_on_host[host_name].append(instance_name)
|
||||||
except Exception:
|
|
||||||
self.log.log_error("Exception trying to show_server: %s" % traceback.format_exc())
|
except Exception:
|
||||||
if self.tries > 0:
|
self.log.log_error("Exception trying to show_server: %s" % traceback.format_exc())
|
||||||
time.sleep(CONF.valet.PAUSE)
|
if self.tries > 0:
|
||||||
self.init_servers_list()
|
time.sleep(CONF.valet.PAUSE)
|
||||||
self.tries -= 1
|
self.tries -= 1
|
||||||
|
self.init_servers_list()
|
||||||
|
|
||||||
|
for host in self.instances_on_host:
|
||||||
|
self.instances_on_host[host] = set(self.instances_on_host[host])
|
||||||
|
|
||||||
def get_instance_name(self, res_name):
|
def get_instance_name(self, res_name):
|
||||||
"""Return instance name (resource name)."""
|
"""Return instance name (resource name)."""
|
||||||
@ -120,22 +126,20 @@ class Analyzer(object):
|
|||||||
"""Return host of instance with matching name."""
|
"""Return host of instance with matching name."""
|
||||||
hosts = []
|
hosts = []
|
||||||
|
|
||||||
if len(self.instance_on_server) == 0:
|
self.init_servers_list()
|
||||||
self.init_servers_list()
|
self.log.log_debug("host - instance dictionary is: %s" % self.host_instance_dict)
|
||||||
self.log.log_info("instance_on_server: %s" %
|
|
||||||
self.instance_on_server)
|
|
||||||
|
|
||||||
for res in res_name:
|
for res in res_name:
|
||||||
name = self.get_instance_name(res)
|
name = self.get_instance_name(res)
|
||||||
hosts.append(self.instance_on_server[name])
|
hosts.append(self.host_instance_dict[name])
|
||||||
|
|
||||||
return hosts
|
return hosts
|
||||||
|
|
||||||
def are_the_same(self, res_name, level):
|
def are_the_same(self, res_name, level):
|
||||||
"""Return true if host aren't the same otherwise return False."""
|
"""Return true if host aren't the same otherwise return False."""
|
||||||
self.log.log_info("are_the_same")
|
self.log.log_info("verifying instances are on the same host/racks")
|
||||||
hosts_list = self.get_instance_host(res_name)
|
hosts_list = self.get_instance_host(res_name)
|
||||||
self.log.log_info(hosts_list)
|
self.log.log_debug("hosts to compare: %s" % hosts_list)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for h in hosts_list:
|
for h in hosts_list:
|
||||||
@ -143,43 +147,50 @@ class Analyzer(object):
|
|||||||
self.get_host_or_rack(level, h),
|
self.get_host_or_rack(level, h),
|
||||||
self.get_host_or_rack(level, hosts_list[0])) is False:
|
self.get_host_or_rack(level, hosts_list[0])) is False:
|
||||||
return False
|
return False
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.log.log_error("Exception at method are_the_same: %s" %
|
self.log.log_error("Exception while verifying instances are on "
|
||||||
ex, traceback.format_exc())
|
"the same host/racks: %s" % ex, traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def are_different(self, res_name, level):
|
def are_different(self, res_name, level):
|
||||||
"""Check if all hosts (and racks) are different for all instances."""
|
"""Check if all hosts (and racks) are different for all instances."""
|
||||||
self.log.log_info("are_different")
|
self.log.log_info("verifying instances are on different hosts/racks")
|
||||||
diction = {}
|
diction = {}
|
||||||
hosts_list = self.get_instance_host(res_name)
|
hosts_list = self.get_instance_host(res_name)
|
||||||
self.log.log_info(hosts_list)
|
self.log.log_debug("hosts to compare: %s" % hosts_list)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for h in hosts_list:
|
for h in hosts_list:
|
||||||
if self.is_already_exists(diction, self.get_host_or_rack(level,
|
if self.is_already_exists(diction, self.get_host_or_rack(level,
|
||||||
h)):
|
h)):
|
||||||
return False
|
return False
|
||||||
return True
|
|
||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.log.log_error("Exception at method are_all_hosts_different: %s"
|
self.log.log_error("Exception while verifying instances are on "
|
||||||
% ex, traceback.format_exc())
|
"different hosts/racks: %s" % ex, traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
def are_we_alone(self, ins_for_group, level):
|
def are_we_alone(self, ins_for_group, level):
|
||||||
"""Return True if no other instances in group on server."""
|
"""Return True if no other instances in group on server."""
|
||||||
self.log.log_info("are_we_alone ")
|
self.log.log_info("verifying instances are on the same group hosts/racks")
|
||||||
self.log.log_info(ins_for_group)
|
|
||||||
|
exclusivity_group_hosts = self.get_exclusivity_group_hosts()
|
||||||
|
|
||||||
|
self.log.log_debug("exclusivity group hosts are: %s " % exclusivity_group_hosts)
|
||||||
|
|
||||||
|
# instances - all the instances on the exclusivity group hosts
|
||||||
|
for host in exclusivity_group_hosts:
|
||||||
|
instances = self.instances_on_host[host]
|
||||||
|
|
||||||
|
self.log.log_debug("exclusivity group instances are: %s " % instances)
|
||||||
|
|
||||||
instances = self.instance_on_server.keys()
|
|
||||||
if level == "rack":
|
if level == "rack":
|
||||||
instances = self.get_rack_instances(set(
|
instances = self.get_rack_instances(set(self.host_instance_dict.values()))
|
||||||
self.instance_on_server.values()))
|
|
||||||
|
|
||||||
# instance_on_server should be all the instances on the rack
|
# host_instance_dict should be all the instances on the rack
|
||||||
if len(instances) < 1:
|
if len(instances) < 1:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -202,6 +213,16 @@ class Analyzer(object):
|
|||||||
ins_group[x].append(internal_ins)
|
ins_group[x].append(internal_ins)
|
||||||
return ins_group
|
return ins_group
|
||||||
|
|
||||||
|
def get_exclusivity_group_hosts(self):
|
||||||
|
''' Get all the hosts that the exclusivity group instances are located on '''
|
||||||
|
servers_list = self.nova_client.list_servers()
|
||||||
|
exclusivity_hosts = []
|
||||||
|
for serv in servers_list["servers"]:
|
||||||
|
if "exclusivity" in serv["name"]:
|
||||||
|
server = self.nova_client.show_server(serv["id"])
|
||||||
|
exclusivity_hosts.append(server["server"]["OS-EXT-SRV-ATTR:host"])
|
||||||
|
return set(exclusivity_hosts)
|
||||||
|
|
||||||
def get_group_instances(self, resources, group_ins):
|
def get_group_instances(self, resources, group_ins):
|
||||||
"""Get the instance object according to the group_ins.
|
"""Get the instance object according to the group_ins.
|
||||||
|
|
||||||
@ -226,8 +247,8 @@ class Analyzer(object):
|
|||||||
racks.append(self.get_rack(host))
|
racks.append(self.get_rack(host))
|
||||||
|
|
||||||
instances = []
|
instances = []
|
||||||
for x in self.instance_on_server:
|
for x in self.host_instance_dict:
|
||||||
if self.get_rack(self.instance_on_server[x]) in racks:
|
if self.get_rack(self.host_instance_dict[x]) in racks:
|
||||||
instances.append(x)
|
instances.append(x)
|
||||||
return instances
|
return instances
|
||||||
|
|
||||||
@ -241,12 +262,10 @@ class Analyzer(object):
|
|||||||
|
|
||||||
def compare_rack(self, current_host, first_host):
|
def compare_rack(self, current_host, first_host):
|
||||||
"""Compare racks for hosts, return true if racks equal."""
|
"""Compare racks for hosts, return true if racks equal."""
|
||||||
self.log.log_debug(current_host)
|
|
||||||
return self.get_rack(current_host) == self.get_rack(first_host)
|
return self.get_rack(current_host) == self.get_rack(first_host)
|
||||||
|
|
||||||
def compare_host(self, current_host, first_host):
|
def compare_host(self, current_host, first_host):
|
||||||
"""Compare current to first host, return True if equal."""
|
"""Compare current to first host, return True if equal."""
|
||||||
self.log.log_debug(current_host)
|
|
||||||
return current_host == first_host
|
return current_host == first_host
|
||||||
|
|
||||||
def get_rack(self, host):
|
def get_rack(self, host):
|
||||||
|
@ -100,7 +100,7 @@ class ScenarioTestCase(test.BaseTestCase):
|
|||||||
|
|
||||||
for key in groups:
|
for key in groups:
|
||||||
if groups[key].group_type == "exclusivity":
|
if groups[key].group_type == "exclusivity":
|
||||||
self.log.log_info(" creating group ")
|
self.log.log_info(" creating valet group ")
|
||||||
grp_name = data_utils.rand_name(name=groups[key].group_name)
|
grp_name = data_utils.rand_name(name=groups[key].group_name)
|
||||||
template_resources.template_data = \
|
template_resources.template_data = \
|
||||||
template_resources.template_data.replace(
|
template_resources.template_data.replace(
|
||||||
@ -119,7 +119,7 @@ class ScenarioTestCase(test.BaseTestCase):
|
|||||||
return res
|
return res
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
self.log.log_error("Failed to create stack", traceback.format_exc())
|
self.log.log_error("Failed to prepare stack for creation", traceback.format_exc())
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@ -142,6 +142,7 @@ class ScenarioTestCase(test.BaseTestCase):
|
|||||||
def get_env_file(self, template):
|
def get_env_file(self, template):
|
||||||
try:
|
try:
|
||||||
env_url = template.replace(".yml", ".env")
|
env_url = template.replace(".yml", ".env")
|
||||||
|
self.log.log_debug("loading environment file (%s)" % env_url)
|
||||||
|
|
||||||
if os.path.exists(env_url):
|
if os.path.exists(env_url):
|
||||||
with open(env_url, "r") as f:
|
with open(env_url, "r") as f:
|
||||||
|
Loading…
Reference in New Issue
Block a user