Fix erroneous Engine bootstrap
Use oslo_log getting music out of api updating valet.conf Health Check for hanged Engine Wrong python-requests version Change-Id: Ibb2649c81dad94e51b579c0a99cfaf37b626095c
This commit is contained in:
parent
e66e4329cd
commit
125f5ccfab
@ -47,45 +47,6 @@ app = {
|
|||||||
],
|
],
|
||||||
}
|
}
|
||||||
|
|
||||||
logging = {
|
|
||||||
'root': {'level': 'INFO', 'handlers': ['console']},
|
|
||||||
'loggers': {
|
|
||||||
'api': {
|
|
||||||
'level': 'DEBUG', 'handlers': ['console'], 'propagate': False
|
|
||||||
},
|
|
||||||
'api.models': {
|
|
||||||
'level': 'INFO', 'handlers': ['console'], 'propagate': False
|
|
||||||
},
|
|
||||||
'api.common': {
|
|
||||||
'level': 'INFO', 'handlers': ['console'], 'propagate': False
|
|
||||||
},
|
|
||||||
'pecan': {
|
|
||||||
'level': 'DEBUG', 'handlers': ['console'], 'propagate': False
|
|
||||||
},
|
|
||||||
'py.warnings': {'handlers': ['console']},
|
|
||||||
'__force_dict__': True
|
|
||||||
},
|
|
||||||
'handlers': {
|
|
||||||
'console': {
|
|
||||||
'level': 'DEBUG',
|
|
||||||
'class': 'logging.StreamHandler',
|
|
||||||
'formatter': 'color'
|
|
||||||
}
|
|
||||||
},
|
|
||||||
'formatters': {
|
|
||||||
'simple': {
|
|
||||||
'format': ('%(asctime)s %(levelname)-5.5s [%(name)s]'
|
|
||||||
'[%(threadName)s] %(message)s')
|
|
||||||
},
|
|
||||||
'color': {
|
|
||||||
'()': 'pecan.log.ColorFormatter',
|
|
||||||
'format': ('%(asctime)s [%(padded_color_levelname)s] [%(name)s]'
|
|
||||||
'[%(threadName)s] %(message)s'),
|
|
||||||
'__force_dict__': True
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
ostro = {
|
ostro = {
|
||||||
'tries': CONF.music.tries,
|
'tries': CONF.music.tries,
|
||||||
'interval': CONF.music.interval,
|
'interval': CONF.music.interval,
|
||||||
|
@ -1,3 +1,9 @@
|
|||||||
|
[DEFAULT]
|
||||||
|
default_log_levels="api=DEBUG,valet=DEBUG,ostro_daemon=DEBUG,ostro_listener=INFO,music=INFO,requests=ERROR,pika=ERROR,pecan=ERROR,urllib3=ERROR"
|
||||||
|
logging_default_format_string='%(asctime)s.%(msecs)03d [%(levelname)-5.5s] [%(name)s] - %(message)s'
|
||||||
|
use_stderr=False
|
||||||
|
log_dir=/var/log/valet
|
||||||
|
|
||||||
# __
|
# __
|
||||||
# /_\ |__| |
|
# /_\ |__| |
|
||||||
# / \ | |
|
# / \ | |
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
LOG = None
|
@ -17,9 +17,11 @@
|
|||||||
|
|
||||||
from pecan.deploy import deploy
|
from pecan.deploy import deploy
|
||||||
from pecan import make_app
|
from pecan import make_app
|
||||||
|
from valet import api
|
||||||
from valet.api.common import identity, messaging
|
from valet.api.common import identity, messaging
|
||||||
from valet.api.conf import register_conf, set_domain
|
|
||||||
from valet.api.db import models
|
from valet.api.db import models
|
||||||
|
from valet.common.conf import get_logger
|
||||||
|
from valet.common.conf import init_conf
|
||||||
|
|
||||||
|
|
||||||
def setup_app(config):
|
def setup_app(config):
|
||||||
@ -37,6 +39,7 @@ def setup_app(config):
|
|||||||
# entry point for apache2
|
# entry point for apache2
|
||||||
def load_app(config_file):
|
def load_app(config_file):
|
||||||
"""App Load."""
|
"""App Load."""
|
||||||
register_conf()
|
init_conf("api.log")
|
||||||
set_domain(project='valet')
|
api.LOG = get_logger("api")
|
||||||
|
|
||||||
return deploy(config_file)
|
return deploy(config_file)
|
||||||
|
@ -16,27 +16,23 @@
|
|||||||
"""Hooks."""
|
"""Hooks."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
from valet.api.common.i18n import _
|
|
||||||
from valet.api.common import terminate_thread
|
|
||||||
from valet.api.v1.controllers import error
|
|
||||||
|
|
||||||
from pecan import conf
|
from pecan import conf
|
||||||
from pecan.hooks import PecanHook
|
from pecan.hooks import PecanHook
|
||||||
import threading
|
import threading
|
||||||
|
from valet import api
|
||||||
|
from valet.api.common.i18n import _
|
||||||
|
from valet.api.common import terminate_thread
|
||||||
|
from valet.api.v1.controllers import error
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class MessageNotificationHook(PecanHook):
|
class MessageNotificationHook(PecanHook):
|
||||||
"""Send API request/responses out as Oslo msg notifications."""
|
"""Send API request/responses out as Oslo msg notifications."""
|
||||||
|
|
||||||
def after(self, state):
|
def after(self, state):
|
||||||
"""Function sends valet notification."""
|
"""Function sends valet notification."""
|
||||||
self.dummy = True
|
self.dummy = True
|
||||||
LOG.info('sending notification')
|
api.LOG.info('sending notification')
|
||||||
notifier = conf.messaging.notifier
|
notifier = conf.messaging.notifier
|
||||||
status_code = state.response.status_code
|
status_code = state.response.status_code
|
||||||
status = webob.exc.status_map.get(status_code)
|
status = webob.exc.status_map.get(status_code)
|
||||||
@ -95,11 +91,11 @@ class MessageNotificationHook(PecanHook):
|
|||||||
notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload))
|
notifier_thread = threading.Thread(target=notifier_fn, args=(ctxt, event_type, payload))
|
||||||
notifier_thread.start()
|
notifier_thread.start()
|
||||||
# launch a timer to verify no hung threads are left behind
|
# launch a timer to verify no hung threads are left behind
|
||||||
# (when timeout expired kill the notifier thread if it still alive)
|
# (when timeout expires kill the notifier thread if it still alive)
|
||||||
watcher = threading.Timer(conf.messaging.timeout, terminate_thread, args=[notifier_thread])
|
watcher = threading.Timer(conf.messaging.timeout, terminate_thread, args=[notifier_thread])
|
||||||
watcher.start()
|
watcher.start()
|
||||||
|
|
||||||
LOG.info('valet notification hook - end')
|
api.LOG.info('notification sent.')
|
||||||
|
|
||||||
|
|
||||||
class NotFoundHook(PecanHook):
|
class NotFoundHook(PecanHook):
|
||||||
|
@ -22,11 +22,9 @@ import iso8601
|
|||||||
from keystoneauth1.identity import v2
|
from keystoneauth1.identity import v2
|
||||||
from keystoneauth1 import session
|
from keystoneauth1 import session
|
||||||
from keystoneclient.v2_0 import client
|
from keystoneclient.v2_0 import client
|
||||||
import logging
|
|
||||||
from pecan import conf
|
from pecan import conf
|
||||||
import pytz
|
import pytz
|
||||||
|
from valet.api import LOG
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def utcnow():
|
def utcnow():
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
import oslo_messaging as messaging
|
import oslo_messaging as messaging
|
||||||
from pecan import conf
|
from pecan import conf
|
||||||
from valet.api.conf import set_domain, DOMAIN
|
|
||||||
|
|
||||||
|
|
||||||
def _messaging_notifier_from_config(config):
|
def _messaging_notifier_from_config(config):
|
||||||
@ -33,7 +32,6 @@ def _messaging_notifier_from_config(config):
|
|||||||
|
|
||||||
def init_messaging():
|
def init_messaging():
|
||||||
"""Initialize the messaging engine and place in the config."""
|
"""Initialize the messaging engine and place in the config."""
|
||||||
set_domain(DOMAIN)
|
|
||||||
config = conf.messaging.config
|
config = conf.messaging.config
|
||||||
notifier = _messaging_notifier_from_config(config)
|
notifier = _messaging_notifier_from_config(config)
|
||||||
conf.messaging.notifier = notifier
|
conf.messaging.notifier = notifier
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
"""Ostro helper library."""
|
"""Ostro helper library."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
|
|
||||||
from pecan import conf
|
from pecan import conf
|
||||||
import time
|
import time
|
||||||
@ -27,8 +26,7 @@ from valet.api.db.models import Group
|
|||||||
from valet.api.db.models import PlacementRequest
|
from valet.api.db.models import PlacementRequest
|
||||||
from valet.api.db.models import PlacementResult
|
from valet.api.db.models import PlacementResult
|
||||||
from valet.api.db.models import Query
|
from valet.api.db.models import Query
|
||||||
|
from valet.api import LOG
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
SERVICEABLE_RESOURCES = [
|
SERVICEABLE_RESOURCES = [
|
||||||
'OS::Nova::Server'
|
'OS::Nova::Server'
|
||||||
@ -192,8 +190,8 @@ class Ostro(object):
|
|||||||
GROUP_TYPE,
|
GROUP_TYPE,
|
||||||
EXCLUSIVITY)
|
EXCLUSIVITY)
|
||||||
|
|
||||||
group = Group.query.filter_by( # pylint: disable=E1101
|
group = Group.query.filter_by(name=group_name).first() # pylint: disable=E1101
|
||||||
name=group_name).first()
|
|
||||||
if not group:
|
if not group:
|
||||||
self.error_uri = '/errors/not_found'
|
self.error_uri = '/errors/not_found'
|
||||||
return_message = "%s '%s' not found" % (GROUP_NAME, group_name)
|
return_message = "%s '%s' not found" % (GROUP_NAME, group_name)
|
||||||
|
@ -21,7 +21,7 @@ from pecan import conf
|
|||||||
import six
|
import six
|
||||||
import uuid
|
import uuid
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.db.models.music.music import Music
|
from valet.common.music import Music
|
||||||
|
|
||||||
|
|
||||||
def get_class(kls):
|
def get_class(kls):
|
||||||
@ -211,6 +211,7 @@ class Query(object):
|
|||||||
|
|
||||||
def __rows_to_objects(self, rows):
|
def __rows_to_objects(self, rows):
|
||||||
"""Convert query response rows to objects"""
|
"""Convert query response rows to objects"""
|
||||||
|
try:
|
||||||
results = []
|
results = []
|
||||||
pk_name = self.model.pk_name() # pylint: disable=E1101
|
pk_name = self.model.pk_name() # pylint: disable=E1101
|
||||||
for __, row in rows.iteritems(): # pylint: disable=W0612
|
for __, row in rows.iteritems(): # pylint: disable=W0612
|
||||||
@ -218,6 +219,10 @@ class Query(object):
|
|||||||
result = self.model(_insert=False, **row)
|
result = self.model(_insert=False, **row)
|
||||||
setattr(result, pk_name, the_id)
|
setattr(result, pk_name, the_id)
|
||||||
results.append(result)
|
results.append(result)
|
||||||
|
except Exception:
|
||||||
|
import traceback
|
||||||
|
print(traceback.format_exc())
|
||||||
|
|
||||||
return Results(results)
|
return Results(results)
|
||||||
|
|
||||||
def all(self):
|
def all(self):
|
||||||
|
@ -16,9 +16,8 @@
|
|||||||
"""Populate command."""
|
"""Populate command."""
|
||||||
|
|
||||||
from pecan.commands.base import BaseCommand
|
from pecan.commands.base import BaseCommand
|
||||||
|
from valet import api
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.conf import register_conf, set_domain
|
|
||||||
from valet.api.db import models
|
from valet.api.db import models
|
||||||
from valet.api.db.models import Event
|
from valet.api.db.models import Event
|
||||||
from valet.api.db.models import Group
|
from valet.api.db.models import Group
|
||||||
@ -26,11 +25,7 @@ from valet.api.db.models import Placement
|
|||||||
from valet.api.db.models import PlacementRequest
|
from valet.api.db.models import PlacementRequest
|
||||||
from valet.api.db.models import PlacementResult
|
from valet.api.db.models import PlacementResult
|
||||||
from valet.api.db.models import Plan
|
from valet.api.db.models import Plan
|
||||||
|
from valet.common.conf import init_conf, get_logger
|
||||||
|
|
||||||
def out(string):
|
|
||||||
"""Output helper."""
|
|
||||||
print("==> %s" % string)
|
|
||||||
|
|
||||||
|
|
||||||
class PopulateCommand(BaseCommand):
|
class PopulateCommand(BaseCommand):
|
||||||
@ -39,13 +34,15 @@ class PopulateCommand(BaseCommand):
|
|||||||
def run(self, args):
|
def run(self, args):
|
||||||
"""Function creates and initializes database and environment."""
|
"""Function creates and initializes database and environment."""
|
||||||
super(PopulateCommand, self).run(args)
|
super(PopulateCommand, self).run(args)
|
||||||
out(_("Loading environment"))
|
|
||||||
register_conf()
|
|
||||||
set_domain()
|
|
||||||
self.load_app()
|
|
||||||
out(_("Building schema"))
|
|
||||||
try:
|
try:
|
||||||
out(_("Starting a transaction..."))
|
init_conf("populate.log")
|
||||||
|
# cfg.CONF.log_file = "populate.log"
|
||||||
|
# cfg.CONF.use_stderr = True
|
||||||
|
LOG = api.LOG = get_logger("populate")
|
||||||
|
LOG.info(_("Loading environment"))
|
||||||
|
self.load_app()
|
||||||
|
LOG.info(_("Building schema"))
|
||||||
|
LOG.info(_("Starting a transaction..."))
|
||||||
models.start()
|
models.start()
|
||||||
|
|
||||||
# FIXME: There's no create_all equivalent for Music.
|
# FIXME: There's no create_all equivalent for Music.
|
||||||
@ -59,10 +56,10 @@ class PopulateCommand(BaseCommand):
|
|||||||
Event.create_table()
|
Event.create_table()
|
||||||
PlacementRequest.create_table()
|
PlacementRequest.create_table()
|
||||||
PlacementResult.create_table()
|
PlacementResult.create_table()
|
||||||
except Exception:
|
except Exception as ex:
|
||||||
models.rollback()
|
models.rollback()
|
||||||
out(_("Rolling back..."))
|
LOG.error("Rolling back... %s" % ex)
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
out(_("Committing."))
|
LOG.info(_("Committing."))
|
||||||
models.commit()
|
models.commit()
|
||||||
|
@ -15,28 +15,26 @@
|
|||||||
|
|
||||||
"""Controllers Package."""
|
"""Controllers Package."""
|
||||||
|
|
||||||
import logging
|
|
||||||
from notario.decorators import instance_of
|
from notario.decorators import instance_of
|
||||||
from notario import ensure
|
from notario import ensure
|
||||||
from os import path
|
from os import path
|
||||||
|
|
||||||
from pecan import redirect, request
|
from pecan import redirect, request
|
||||||
import string
|
import string
|
||||||
|
from valet import api
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.db.models import Placement
|
from valet.api.db.models import Placement
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Notario Helpers
|
# Notario Helpers
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
def valid_group_name(value):
|
def valid_group_name(value):
|
||||||
"""Validator for group name type."""
|
"""Validator for group name type."""
|
||||||
if not value or not set(value) <= set(string.letters + string.digits + "-._~"):
|
if not value or not set(value) <= set(string.letters + string.digits + "-._~"):
|
||||||
LOG.error("group name is not valid")
|
api.LOG.error("group name is not valid")
|
||||||
LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \
|
api.LOG.error("group name must contain only uppercase and lowercase letters, decimal digits, \
|
||||||
hyphens, periods, underscores, and tildes [RFC 3986, Section 2.3]")
|
hyphens, periods, underscores, and tildes [RFC 3986, Section 2.3]")
|
||||||
|
|
||||||
|
|
||||||
@ -73,16 +71,13 @@ def reserve_placement(placement, resource_id=None, reserve=True, update=True):
|
|||||||
the data store (if the update will be made later).
|
the data store (if the update will be made later).
|
||||||
"""
|
"""
|
||||||
if placement:
|
if placement:
|
||||||
LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'),
|
api.LOG.info(_('%(rsrv)s placement of %(orch_id)s in %(loc)s.'),
|
||||||
{'rsrv': _("Reserving") if reserve else _("Unreserving"),
|
{'rsrv': _("Reserving") if reserve else _("Unreserving"),
|
||||||
'orch_id': placement.orchestration_id,
|
'orch_id': placement.orchestration_id, 'loc': placement.location})
|
||||||
'loc': placement.location})
|
|
||||||
placement.reserved = reserve
|
placement.reserved = reserve
|
||||||
if resource_id:
|
if resource_id:
|
||||||
LOG.info(_('Associating resource id %(res_id)s with '
|
api.LOG.info(_('Associating resource id %(res_id)s with orchestration id %(orch_id)s.'),
|
||||||
'orchestration id %(orch_id)s.'),
|
{'res_id': resource_id, 'orch_id': placement.orchestration_id})
|
||||||
{'res_id': resource_id,
|
|
||||||
'orch_id': placement.orchestration_id})
|
|
||||||
placement.resource_id = resource_id
|
placement.resource_id = resource_id
|
||||||
if update:
|
if update:
|
||||||
placement.update()
|
placement.update()
|
||||||
@ -97,10 +92,8 @@ def update_placements(placements, reserve_id=None, unlock_all=False):
|
|||||||
properties = placements[uuid]['properties']
|
properties = placements[uuid]['properties']
|
||||||
location = properties['host']
|
location = properties['host']
|
||||||
if placement.location != location:
|
if placement.location != location:
|
||||||
LOG.info(_('Changing placement of %(orch_id)s '
|
api.LOG.info(_('Changing placement of %(orch_id)s from %(old_loc)s to %(new_loc)s.'),
|
||||||
'from %(old_loc)s to %(new_loc)s.'),
|
{'orch_id': placement.orchestration_id, 'old_loc': placement.location,
|
||||||
{'orch_id': placement.orchestration_id,
|
|
||||||
'old_loc': placement.location,
|
|
||||||
'new_loc': location})
|
'new_loc': location})
|
||||||
placement.location = location
|
placement.location = location
|
||||||
if unlock_all:
|
if unlock_all:
|
||||||
|
@ -15,15 +15,11 @@
|
|||||||
|
|
||||||
"""Errors."""
|
"""Errors."""
|
||||||
|
|
||||||
import logging
|
|
||||||
from pecan import expose, request, response
|
from pecan import expose, request, response
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
|
from valet.api import LOG
|
||||||
from webob.exc import status_map
|
from webob.exc import status_map
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# pylint: disable=R0201
|
|
||||||
|
|
||||||
|
|
||||||
def error_wrapper(func):
|
def error_wrapper(func):
|
||||||
"""Error decorator."""
|
"""Error decorator."""
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
"""Groups."""
|
"""Groups."""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from notario import decorators
|
from notario import decorators
|
||||||
from notario.validators import types
|
from notario.validators import types
|
||||||
from pecan import conf, expose, request, response
|
from pecan import conf, expose, request, response
|
||||||
@ -27,8 +25,7 @@ from valet.api.common.i18n import _
|
|||||||
from valet.api.common.ostro_helper import Ostro
|
from valet.api.common.ostro_helper import Ostro
|
||||||
from valet.api.db.models import Group
|
from valet.api.db.models import Group
|
||||||
from valet.api.v1.controllers import error, valid_group_name
|
from valet.api.v1.controllers import error, valid_group_name
|
||||||
|
from valet import api
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
GROUPS_SCHEMA = (
|
GROUPS_SCHEMA = (
|
||||||
(decorators.optional('description'), types.string),
|
(decorators.optional('description'), types.string),
|
||||||
@ -82,8 +79,8 @@ def tenant_servers_in_group(tenant_id, group):
|
|||||||
if server.tenant_id == tenant_id:
|
if server.tenant_id == tenant_id:
|
||||||
servers.append(server_id)
|
servers.append(server_id)
|
||||||
except Exception as ex: # TODO(JD): update DB
|
except Exception as ex: # TODO(JD): update DB
|
||||||
LOG.error("Instance %s could not be found" % server_id)
|
api.LOG.error("Instance %s could not be found" % server_id)
|
||||||
LOG.error(ex)
|
api.LOG.error(ex)
|
||||||
if len(servers) > 0:
|
if len(servers) > 0:
|
||||||
return servers
|
return servers
|
||||||
|
|
||||||
@ -292,9 +289,14 @@ class GroupsController(object):
|
|||||||
@index.when(method='GET', template='json')
|
@index.when(method='GET', template='json')
|
||||||
def index_get(self):
|
def index_get(self):
|
||||||
"""List groups."""
|
"""List groups."""
|
||||||
|
try:
|
||||||
groups_array = []
|
groups_array = []
|
||||||
for group in Group.query.all(): # pylint: disable=E1101
|
for group in Group.query.all(): # pylint: disable=E1101
|
||||||
groups_array.append(group)
|
groups_array.append(group)
|
||||||
|
except Exception:
|
||||||
|
import traceback
|
||||||
|
api.LOG.error(traceback.format_exc())
|
||||||
|
response.status = 500
|
||||||
return {'groups': groups_array}
|
return {'groups': groups_array}
|
||||||
|
|
||||||
@index.when(method='POST', template='json')
|
@index.when(method='POST', template='json')
|
||||||
|
@ -15,18 +15,12 @@
|
|||||||
|
|
||||||
"""Placements."""
|
"""Placements."""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from pecan import expose, request, response
|
from pecan import expose, request, response
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.common.ostro_helper import Ostro
|
from valet.api.common.ostro_helper import Ostro
|
||||||
from valet.api.db.models import Placement, Plan
|
from valet.api.db.models import Placement, Plan
|
||||||
from valet.api.v1.controllers import error
|
from valet.api.v1.controllers import error, reserve_placement, update_placements
|
||||||
from valet.api.v1.controllers import reserve_placement
|
from valet import api
|
||||||
from valet.api.v1.controllers import update_placements
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# pylint: disable=R0201
|
# pylint: disable=R0201
|
||||||
|
|
||||||
@ -80,12 +74,11 @@ class PlacementsItemController(object):
|
|||||||
Once reserved, the location effectively becomes immutable.
|
Once reserved, the location effectively becomes immutable.
|
||||||
"""
|
"""
|
||||||
res_id = kwargs.get('resource_id')
|
res_id = kwargs.get('resource_id')
|
||||||
LOG.info(_('Placement reservation request for resource id '
|
api.LOG.info(_('Placement reservation request for resource id %(res_id)s, orchestration id %(orch_id)s.'),
|
||||||
'%(res_id)s, orchestration id %(orch_id)s.'),
|
|
||||||
{'res_id': res_id, 'orch_id': self.placement.orchestration_id})
|
{'res_id': res_id, 'orch_id': self.placement.orchestration_id})
|
||||||
locations = kwargs.get('locations', [])
|
locations = kwargs.get('locations', [])
|
||||||
locations_str = ', '.join(locations)
|
locations_str = ', '.join(locations)
|
||||||
LOG.info(_('Candidate locations: %s'), locations_str)
|
api.LOG.info(_('Candidate locations: %s'), locations_str)
|
||||||
if self.placement.location in locations:
|
if self.placement.location in locations:
|
||||||
# Ostro's placement is in the list of candidates. Good!
|
# Ostro's placement is in the list of candidates. Good!
|
||||||
# Reserve it. Remember the resource id too.
|
# Reserve it. Remember the resource id too.
|
||||||
@ -95,12 +88,8 @@ class PlacementsItemController(object):
|
|||||||
else:
|
else:
|
||||||
# Ostro's placement is NOT in the list of candidates.
|
# Ostro's placement is NOT in the list of candidates.
|
||||||
# Time for Plan B.
|
# Time for Plan B.
|
||||||
LOG.info(_('Placement of resource id %(res_id)s, '
|
api.LOG.info(_('Placement of resource id %(res_id)s, orchestration id %(orch_id)s in %(loc)s not allowed. Replanning.'),
|
||||||
'orchestration id %(orch_id)s in %(loc)s '
|
{'res_id': res_id, 'orch_id': self.placement.orchestration_id, 'loc': self.placement.location})
|
||||||
'not allowed. Replanning.'),
|
|
||||||
{'res_id': res_id,
|
|
||||||
'orch_id': self.placement.orchestration_id,
|
|
||||||
'loc': self.placement.location})
|
|
||||||
|
|
||||||
# Unreserve the placement. Remember the resource id too.
|
# Unreserve the placement. Remember the resource id too.
|
||||||
kwargs = {'resource_id': res_id, 'reserve': False}
|
kwargs = {'resource_id': res_id, 'reserve': False}
|
||||||
@ -117,9 +106,9 @@ class PlacementsItemController(object):
|
|||||||
exclusions = [x.orchestration_id for x in reserved]
|
exclusions = [x.orchestration_id for x in reserved]
|
||||||
if exclusions:
|
if exclusions:
|
||||||
exclusions_str = ', '.join(exclusions)
|
exclusions_str = ', '.join(exclusions)
|
||||||
LOG.info(_('Excluded orchestration IDs: %s'), exclusions_str)
|
api.LOG.info(_('Excluded orchestration IDs: %s'), exclusions_str)
|
||||||
else:
|
else:
|
||||||
LOG.info(_('No excluded orchestration IDs.'))
|
api.LOG.info(_('No excluded orchestration IDs.'))
|
||||||
|
|
||||||
# Ask Ostro to try again with new constraints.
|
# Ask Ostro to try again with new constraints.
|
||||||
# We may get one or more updated placements in return.
|
# We may get one or more updated placements in return.
|
||||||
@ -158,7 +147,7 @@ class PlacementsItemController(object):
|
|||||||
"""Delete a Placement."""
|
"""Delete a Placement."""
|
||||||
orch_id = self.placement.orchestration_id
|
orch_id = self.placement.orchestration_id
|
||||||
self.placement.delete()
|
self.placement.delete()
|
||||||
LOG.info(_('Placement with orchestration id %s deleted.'), orch_id)
|
api.LOG.info(_('Placement with orchestration id %s deleted.'), orch_id)
|
||||||
response.status = 204
|
response.status = 204
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,22 +15,16 @@
|
|||||||
|
|
||||||
"""Plans."""
|
"""Plans."""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from notario import decorators
|
from notario import decorators
|
||||||
from notario.validators import types
|
from notario.validators import types
|
||||||
from pecan import expose, request, response
|
from pecan import expose, request, response
|
||||||
from pecan_notario import validate
|
from pecan_notario import validate
|
||||||
|
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.common.ostro_helper import Ostro
|
from valet.api.common.ostro_helper import Ostro
|
||||||
from valet.api.db.models import Placement, Plan
|
from valet.api.db.models import Placement, Plan
|
||||||
from valet.api.v1.controllers import error
|
from valet.api.v1.controllers import error, set_placements, update_placements, valid_plan_update_action
|
||||||
from valet.api.v1.controllers import set_placements
|
from valet.api import LOG
|
||||||
from valet.api.v1.controllers import update_placements
|
|
||||||
from valet.api.v1.controllers import valid_plan_update_action
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
CREATE_SCHEMA = (
|
CREATE_SCHEMA = (
|
||||||
('plan_name', types.string),
|
('plan_name', types.string),
|
||||||
|
@ -15,8 +15,6 @@
|
|||||||
|
|
||||||
"""Root."""
|
"""Root."""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from pecan import expose, request, response
|
from pecan import expose, request, response
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.v1.controllers import error
|
from valet.api.v1.controllers import error
|
||||||
@ -25,8 +23,6 @@ from valet.api.v1.controllers.v1 import V1Controller
|
|||||||
|
|
||||||
from webob.exc import status_map
|
from webob.exc import status_map
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# pylint: disable=R0201
|
# pylint: disable=R0201
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,15 +15,11 @@
|
|||||||
|
|
||||||
"""Status."""
|
"""Status."""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from pecan import expose, request, response
|
from pecan import expose, request, response
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.common.ostro_helper import Ostro
|
from valet.api.common.ostro_helper import Ostro
|
||||||
from valet.api.v1.controllers import error
|
from valet.api.v1.controllers import error
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# pylint: disable=R0201
|
# pylint: disable=R0201
|
||||||
|
|
||||||
|
|
||||||
|
@ -15,11 +15,9 @@
|
|||||||
|
|
||||||
"""v1."""
|
"""v1."""
|
||||||
|
|
||||||
import logging
|
|
||||||
|
|
||||||
from pecan import conf, expose, request, response
|
from pecan import conf, expose, request, response
|
||||||
from pecan.secure import SecureController
|
from pecan.secure import SecureController
|
||||||
|
from valet import api
|
||||||
from valet.api.common.i18n import _
|
from valet.api.common.i18n import _
|
||||||
from valet.api.v1.controllers import error
|
from valet.api.v1.controllers import error
|
||||||
from valet.api.v1.controllers.groups import GroupsController
|
from valet.api.v1.controllers.groups import GroupsController
|
||||||
@ -28,11 +26,6 @@ from valet.api.v1.controllers.plans import PlansController
|
|||||||
from valet.api.v1.controllers.status import StatusController
|
from valet.api.v1.controllers.status import StatusController
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
# pylint: disable=R0201
|
|
||||||
|
|
||||||
|
|
||||||
class V1Controller(SecureController):
|
class V1Controller(SecureController):
|
||||||
"""v1 Controller /v1."""
|
"""v1 Controller /v1."""
|
||||||
|
|
||||||
@ -58,11 +51,11 @@ class V1Controller(SecureController):
|
|||||||
token = conf.identity.engine.validate_token(auth_token)
|
token = conf.identity.engine.validate_token(auth_token)
|
||||||
|
|
||||||
if token:
|
if token:
|
||||||
LOG.debug("Checking token permissions")
|
api.LOG.debug("Checking token permissions")
|
||||||
msg = "Unauthorized - Permission was not granted"
|
msg = "Unauthorized - Permission was not granted"
|
||||||
if V1Controller._permission_granted(request, token):
|
if V1Controller._permission_granted(request, token):
|
||||||
tenant_id = conf.identity.engine.tenant_from_token(token)
|
tenant_id = conf.identity.engine.tenant_from_token(token)
|
||||||
LOG.info("tenant_id - " + str(tenant_id))
|
api.LOG.info("tenant_id - " + str(tenant_id))
|
||||||
if tenant_id:
|
if tenant_id:
|
||||||
request.context['tenant_id'] = tenant_id
|
request.context['tenant_id'] = tenant_id
|
||||||
user_id = conf.identity.engine.user_from_token(token)
|
user_id = conf.identity.engine.user_from_token(token)
|
||||||
|
@ -18,6 +18,8 @@
|
|||||||
from common.i18n import _
|
from common.i18n import _
|
||||||
import os
|
import os
|
||||||
from pecan.deploy import deploy
|
from pecan.deploy import deploy
|
||||||
|
from valet.common.conf import init_conf, get_logger
|
||||||
|
from valet import api
|
||||||
|
|
||||||
|
|
||||||
def config_file(file_name=None):
|
def config_file(file_name=None):
|
||||||
@ -39,15 +41,20 @@ def application(environ, start_response):
|
|||||||
# TODO(JD): Integrate this with a python entry point
|
# TODO(JD): Integrate this with a python entry point
|
||||||
# This way we can run valet-api from the command line in a pinch.
|
# This way we can run valet-api from the command line in a pinch.
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
try:
|
||||||
from wsgiref.simple_server import make_server # disable=C0411,C0413
|
from wsgiref.simple_server import make_server # disable=C0411,C0413
|
||||||
|
|
||||||
# TODO(JD): At some point, it would be nice to use pecan_mount
|
# TODO(JD): At some point, it would be nice to use pecan_mount
|
||||||
from valet.api.conf import register_conf, set_domain
|
# import pecan_mount
|
||||||
register_conf()
|
# HTTPD = make_server('', 8090, pecan_mount.tree)
|
||||||
set_domain()
|
# from valet.api.conf import register_conf, set_domain
|
||||||
HTTPD = make_server('', 8090,
|
init_conf("wsgi.log")
|
||||||
deploy(config_file('/var/www/valet/config.py')))
|
api.LOG = get_logger("wsgi")
|
||||||
|
HTTPD = make_server('', 8090, deploy(config_file('/var/www/valet/config.py')))
|
||||||
print(_("Serving HTTP on port 8090..."))
|
print(_("Serving HTTP on port 8090..."))
|
||||||
|
|
||||||
# Respond to requests until process is killed
|
# Respond to requests until process is killed
|
||||||
HTTPD.serve_forever()
|
HTTPD.serve_forever()
|
||||||
|
except Exception:
|
||||||
|
import traceback
|
||||||
|
print(traceback.format_exc())
|
||||||
|
@ -19,7 +19,7 @@ import argparse
|
|||||||
import json
|
import json
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
import requests
|
import requests
|
||||||
from valet.api.conf import register_conf, set_domain
|
from valet.common import conf as common
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -211,8 +211,7 @@ def populate_args_request_body(args):
|
|||||||
|
|
||||||
def run(args):
|
def run(args):
|
||||||
"""Run."""
|
"""Run."""
|
||||||
register_conf()
|
common.init_conf("cli.log")
|
||||||
set_domain(project='valet')
|
|
||||||
args.host = args.host or CONF.server.host
|
args.host = args.host or CONF.server.host
|
||||||
args.port = args.port or CONF.server.port
|
args.port = args.port or CONF.server.port
|
||||||
args.timeout = args.timeout or 10
|
args.timeout = args.timeout or 10
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import sys
|
import sys
|
||||||
import valet.cli.groupcli as groupcli
|
import valet.cli.groupcli as groupcli
|
||||||
# import logging
|
|
||||||
|
|
||||||
|
|
||||||
class Cli(object):
|
class Cli(object):
|
||||||
|
15
valet/common/__init__.py
Normal file
15
valet/common/__init__.py
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
|
||||||
|
def logger_conf(logger_name):
|
||||||
|
return [
|
||||||
|
cfg.StrOpt('output_format', default="%(asctime)s - %(levelname)s - %(message)s"), # dict
|
||||||
|
cfg.BoolOpt('store', default=True),
|
||||||
|
cfg.StrOpt('logging_level', default='debug'),
|
||||||
|
cfg.StrOpt('logging_dir', default='/var/log/valet/'),
|
||||||
|
cfg.StrOpt('logger_name', default=logger_name + ".log"),
|
||||||
|
cfg.IntOpt('max_main_log_size', default=5000000),
|
||||||
|
cfg.IntOpt('max_log_size', default=1000000),
|
||||||
|
cfg.IntOpt('max_num_of_logs', default=3),
|
||||||
|
]
|
@ -16,12 +16,16 @@
|
|||||||
"""Conf."""
|
"""Conf."""
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
DOMAIN = 'valet'
|
DOMAIN = 'valet'
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
def get_logger(name):
|
||||||
|
return logging.getLogger(name)
|
||||||
|
|
||||||
|
LOG = get_logger("engine")
|
||||||
|
|
||||||
server_group = cfg.OptGroup(name='server', title='Valet API Server conf')
|
server_group = cfg.OptGroup(name='server', title='Valet API Server conf')
|
||||||
server_opts = [
|
server_opts = [
|
||||||
@ -70,19 +74,34 @@ music_opts = [
|
|||||||
# cfg.ListOpt('db_hosts', default='valet1,valet2,valet3')
|
# cfg.ListOpt('db_hosts', default='valet1,valet2,valet3')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
def load_conf(args=None, project=DOMAIN, default_files=None):
|
||||||
def set_domain(project=DOMAIN):
|
CONF(default_config_files=default_files) if default_files else CONF(args or [], project=project)
|
||||||
"""Set Domain."""
|
|
||||||
CONF([], project)
|
|
||||||
|
|
||||||
|
|
||||||
def register_conf():
|
def init_conf(log_file="valet.log", args=None, grp2opt=None, cli_opts=None, default_config_files=None):
|
||||||
"""Register confs."""
|
CONF.log_file = log_file
|
||||||
CONF.register_group(server_group)
|
logging.register_options(CONF)
|
||||||
CONF.register_opts(server_opts, server_group)
|
|
||||||
CONF.register_group(music_group)
|
# init conf
|
||||||
CONF.register_opts(music_opts, music_group)
|
general_groups = {server_group: server_opts, music_group: music_opts,
|
||||||
CONF.register_group(identity_group)
|
identity_group: identity_opts, messaging_group: messaging_opts}
|
||||||
CONF.register_opts(identity_opts, identity_group)
|
|
||||||
CONF.register_group(messaging_group)
|
general_groups.update(grp2opt or {})
|
||||||
CONF.register_opts(messaging_opts, messaging_group)
|
|
||||||
|
_register_conf(general_groups, cli_opts)
|
||||||
|
load_conf(args=args, default_files=default_config_files)
|
||||||
|
|
||||||
|
# set logger
|
||||||
|
_set_logger()
|
||||||
|
|
||||||
|
|
||||||
|
def _set_logger():
|
||||||
|
logging.setup(CONF, DOMAIN)
|
||||||
|
|
||||||
|
def _register_conf(grp2opt, cli_opts):
|
||||||
|
for grp in grp2opt or {}:
|
||||||
|
CONF.register_group(grp)
|
||||||
|
CONF.register_opts(grp2opt[grp], grp)
|
||||||
|
|
||||||
|
for opt in cli_opts or []:
|
||||||
|
CONF.register_cli_opts(opt)
|
@ -16,14 +16,12 @@
|
|||||||
"""Music Data Store API."""
|
"""Music Data Store API."""
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
import time
|
|
||||||
|
|
||||||
from valet.api.common.i18n import _
|
|
||||||
|
|
||||||
import requests
|
import requests
|
||||||
|
import time
|
||||||
|
from valet.api.common.i18n import _
|
||||||
|
from valet.common.conf import get_logger
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = get_logger("music")
|
||||||
|
|
||||||
|
|
||||||
class REST(object):
|
class REST(object):
|
||||||
@ -47,7 +45,8 @@ class REST(object):
|
|||||||
def urls(self):
|
def urls(self):
|
||||||
"""Return list of URLs using each host, plus the port/path."""
|
"""Return list of URLs using each host, plus the port/path."""
|
||||||
if not self._urls:
|
if not self._urls:
|
||||||
urls = []
|
# make localhost as first option
|
||||||
|
urls = ['http://localhost:%s%s' % (self.port, self.path)]
|
||||||
for host in self.hosts:
|
for host in self.hosts:
|
||||||
# Must end without a slash
|
# Must end without a slash
|
||||||
urls.append('http://%(host)s:%(port)s%(path)s' % {
|
urls.append('http://%(host)s:%(port)s%(path)s' % {
|
||||||
@ -81,12 +80,10 @@ class REST(object):
|
|||||||
full_url = url + path
|
full_url = url + path
|
||||||
try:
|
try:
|
||||||
data_json = json.dumps(data) if data else None
|
data_json = json.dumps(data) if data else None
|
||||||
LOG.debug("Music Request: %s %s%s", method.upper(), full_url,
|
LOG.debug("Music Request: %s %s%s", method.upper(), full_url, data_json if data else '')
|
||||||
data_json if data else '')
|
response = method_fn(full_url, data=data_json, headers=self.__headers(content_type), timeout=self.timeout)
|
||||||
response = method_fn(full_url, data=data_json,
|
|
||||||
headers=self.__headers(content_type),
|
|
||||||
timeout=self.timeout)
|
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
|
|
||||||
return response
|
return response
|
||||||
except requests.exceptions.Timeout as err:
|
except requests.exceptions.Timeout as err:
|
||||||
response = requests.Response()
|
response = requests.Response()
|
||||||
@ -151,6 +148,7 @@ class Music(object):
|
|||||||
|
|
||||||
path = '/keyspaces/%s' % keyspace
|
path = '/keyspaces/%s' % keyspace
|
||||||
response = self.rest.request(method='post', path=path, data=data)
|
response = self.rest.request(method='post', path=path, data=data)
|
||||||
|
|
||||||
return response.ok
|
return response.ok
|
||||||
|
|
||||||
def create_table(self, keyspace, table, schema):
|
def create_table(self, keyspace, table, schema):
|
@ -16,7 +16,8 @@
|
|||||||
"""Conf."""
|
"""Conf."""
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from valet.api import conf as api
|
import sys
|
||||||
|
from valet.common import logger_conf, conf as common
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -27,178 +28,47 @@ ostro_cli_opts = [
|
|||||||
help='engine command.'),
|
help='engine command.'),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf')
|
engine_group = cfg.OptGroup(name='engine', title='Valet Engine conf')
|
||||||
engine_opts = [
|
engine_opts = [
|
||||||
cfg.StrOpt(
|
cfg.StrOpt('pid', default='/var/run/valet/ostro-daemon.pid'),
|
||||||
'pid',
|
cfg.StrOpt('mode', default='live', help='sim will let Ostro simulate datacenter, while live will let it handle a real datacenter'),
|
||||||
default='/var/run/valet/ostro-daemon.pid'
|
cfg.StrOpt('sim_cfg_loc', default='/etc/valet/engine/ostro_sim.cfg'),
|
||||||
),
|
cfg.BoolOpt('network_control', default=False, help='whether network controller (i.e., Tegu) has been deployed'),
|
||||||
cfg.StrOpt(
|
cfg.StrOpt('network_control_url', default='http://network_control:29444/tegu/api'),
|
||||||
'mode',
|
cfg.StrOpt('ip', default='localhost'),
|
||||||
default='live',
|
cfg.IntOpt('health_timeout', default=6, help='health check grace period (seconds, default=5)'),
|
||||||
help="""
|
cfg.IntOpt('priority', default=1, help='this instance priority (master=1)'),
|
||||||
Sim will let Ostro simulate datacenter, while live will
|
cfg.StrOpt('rpc_server_ip', default='localhost',
|
||||||
let it handle a real datacenter.
|
help='Set RPC server ip and port if used. Otherwise, ignore these parameters'),
|
||||||
"""),
|
cfg.StrOpt('rpc_server_port', default='8002'),
|
||||||
cfg.StrOpt(
|
cfg.StrOpt('datacenter_name', default='bigsite',
|
||||||
'sim_cfg_loc',
|
help='Inform the name of datacenter (region name), where Valet/Ostro is deployed.'),
|
||||||
default='/etc/valet/engine/ostro_sim.cfg'),
|
cfg.IntOpt('num_of_region_chars', default='3', help='number of chars that indicates the region code'),
|
||||||
cfg.BoolOpt(
|
cfg.StrOpt('rack_code_list', default='r', help='rack indicator.'),
|
||||||
'network_control',
|
cfg.ListOpt('node_code_list', default='a,c,u,f,o,p,s',
|
||||||
default=False,
|
help='indicates the node type. a: network, c KVM compute, u: ESXi compute, f: ?, o: operation, '
|
||||||
help="""
|
'p: power, s: storage.'),
|
||||||
Whether network controller (i.e., Tegu) has been deployed
|
cfg.StrOpt('compute_trigger_time', default='1:00',
|
||||||
"""),
|
help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'),
|
||||||
cfg.StrOpt(
|
cfg.IntOpt('compute_trigger_frequency', default=3600,
|
||||||
'network_control_url',
|
help='trigger time or frequency for checking compute hosting server status (i.e., call Nova)'),
|
||||||
default='http://network_control:29444/tegu/api'),
|
cfg.StrOpt('topology_trigger_time', default='2:00',
|
||||||
cfg.StrOpt(
|
help='Set trigger time or frequency for checking datacenter topology (i.e., call AIC Formation)'),
|
||||||
'ip',
|
cfg.IntOpt('topology_trigger_frequency', default=3600,
|
||||||
default='localhost'),
|
help='Set trigger time or frequency for checking datacenter topology (i.e., call AIC Formation)'),
|
||||||
cfg.IntOpt(
|
cfg.IntOpt('default_cpu_allocation_ratio', default=16, help='Set default overbooking ratios. '
|
||||||
'priority',
|
'Note that each compute node can have its own ratios'),
|
||||||
default=1,
|
cfg.IntOpt('default_ram_allocation_ratio', default=1.5, help='Set default overbooking ratios. '
|
||||||
help="""
|
'Note that each compute node can have its own ratios'),
|
||||||
This instance priority (master=1)
|
cfg.IntOpt('default_disk_allocation_ratio', default=1, help='Set default overbooking ratios. '
|
||||||
"""),
|
'Note that each compute node can have its own ratios'),
|
||||||
cfg.StrOpt(
|
cfg.IntOpt('static_cpu_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||||
'rpc_server_ip',
|
'that are set aside for applications workload spikes.'),
|
||||||
default='localhost',
|
cfg.IntOpt('static_mem_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||||
help="""
|
'that are set aside for applications workload spikes.'),
|
||||||
Set RPC server ip and port if used. Otherwise, ignore these parameters
|
cfg.IntOpt('static_local_disk_standby_ratio', default=20, help='unused percentages of resources (i.e. standby) '
|
||||||
"""),
|
'that are set aside for applications workload spikes.'),
|
||||||
cfg.StrOpt(
|
] + logger_conf("engine")
|
||||||
'rpc_server_port',
|
|
||||||
default='8002'
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'logger_name',
|
|
||||||
default='engine.log'
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'logging_level',
|
|
||||||
default='debug'
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'logging_dir',
|
|
||||||
default='/var/log/valet/'
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'max_main_log_size',
|
|
||||||
default=5000000
|
|
||||||
),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'max_log_size',
|
|
||||||
default=1000000
|
|
||||||
),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'max_num_of_logs',
|
|
||||||
default=20
|
|
||||||
),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'datacenter_name',
|
|
||||||
default='bigsite',
|
|
||||||
help="""
|
|
||||||
Inform the name of datacenter (region name), where Valet/Ostro is deployed.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'num_of_region_chars',
|
|
||||||
default='3',
|
|
||||||
help="""
|
|
||||||
Number of chars that indicates the region code
|
|
||||||
"""),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'rack_code_list',
|
|
||||||
default='r',
|
|
||||||
help="""
|
|
||||||
Rack indicator.
|
|
||||||
"""),
|
|
||||||
cfg.ListOpt(
|
|
||||||
'node_code_list',
|
|
||||||
default='a,c,u,f,o,p,s',
|
|
||||||
help="""
|
|
||||||
Indicates the node type.
|
|
||||||
|
|
||||||
Values:
|
|
||||||
|
|
||||||
* a: network
|
|
||||||
* c KVM compute
|
|
||||||
* u: ESXi compute
|
|
||||||
* f: ?
|
|
||||||
* o: operation
|
|
||||||
* p: power
|
|
||||||
* s: storage.
|
|
||||||
"""),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'compute_trigger_time',
|
|
||||||
default='1:00',
|
|
||||||
help="""
|
|
||||||
Trigger time or frequency for checking compute hosting server status
|
|
||||||
(i.e., call Nova)
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'compute_trigger_frequency',
|
|
||||||
default=3600,
|
|
||||||
help="""
|
|
||||||
Trigger time or frequency for checking compute hosting server status
|
|
||||||
(i.e., call Nova).
|
|
||||||
"""),
|
|
||||||
cfg.StrOpt(
|
|
||||||
'topology_trigger_time',
|
|
||||||
default='2:00',
|
|
||||||
help="""
|
|
||||||
Set trigger time or frequency for checking datacenter topology.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'topology_trigger_frequency',
|
|
||||||
default=3600,
|
|
||||||
help="""
|
|
||||||
Set trigger time or frequency for checking datacenter topology.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'default_cpu_allocation_ratio',
|
|
||||||
default=16,
|
|
||||||
help="""
|
|
||||||
Set default overbooking ratios.
|
|
||||||
Note that each compute node can have its own ratios.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'default_ram_allocation_ratio',
|
|
||||||
default=1.5,
|
|
||||||
help="""
|
|
||||||
Set default overbooking ratios.
|
|
||||||
Note that each compute node can have its own ratios.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'default_disk_allocation_ratio',
|
|
||||||
default=1,
|
|
||||||
help="""
|
|
||||||
Set default overbooking ratios.
|
|
||||||
Note that each compute node can have its own ratios.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'static_cpu_standby_ratio',
|
|
||||||
default=20,
|
|
||||||
help="""
|
|
||||||
Unused percentages of resources (i.e. standby) that are set
|
|
||||||
aside for applications workload spikes.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'static_mem_standby_ratio',
|
|
||||||
default=20,
|
|
||||||
help="""
|
|
||||||
Unused percentages of resources (i.e. standby) that are set
|
|
||||||
aside for applications workload spikes.
|
|
||||||
"""),
|
|
||||||
cfg.IntOpt(
|
|
||||||
'static_local_disk_standby_ratio',
|
|
||||||
default=20,
|
|
||||||
help="""
|
|
||||||
Unused percentages of resources (i.e. standby) that are set
|
|
||||||
aside for applications workload spikes.
|
|
||||||
"""),
|
|
||||||
]
|
|
||||||
|
|
||||||
listener_group = cfg.OptGroup(name='events_listener',
|
listener_group = cfg.OptGroup(name='events_listener',
|
||||||
title='Valet Engine listener')
|
title='Valet Engine listener')
|
||||||
@ -206,20 +76,14 @@ listener_opts = [
|
|||||||
cfg.StrOpt('exchange', default='nova'),
|
cfg.StrOpt('exchange', default='nova'),
|
||||||
cfg.StrOpt('exchange_type', default='topic'),
|
cfg.StrOpt('exchange_type', default='topic'),
|
||||||
cfg.BoolOpt('auto_delete', default=False),
|
cfg.BoolOpt('auto_delete', default=False),
|
||||||
cfg.StrOpt('output_format', default='dict'),
|
|
||||||
cfg.BoolOpt('store', default=True),
|
cfg.BoolOpt('store', default=True),
|
||||||
cfg.StrOpt('logging_level', default='debug'),
|
] + logger_conf("ostro_listener")
|
||||||
cfg.StrOpt('logging_loc', default='/var/log/valet/'),
|
|
||||||
cfg.StrOpt('logger_name', default='ostro_listener.log'),
|
|
||||||
cfg.IntOpt('max_main_log_size', default=5000000),
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def register_conf():
|
def init_engine(default_config_files=None):
|
||||||
"""Function calls api and registers configs opts."""
|
""" register the engine and the listener groups """
|
||||||
api.register_conf()
|
common.init_conf("engine.log", args=sys.argv[1:],
|
||||||
CONF.register_group(engine_group)
|
grp2opt={engine_group: engine_opts,
|
||||||
CONF.register_opts(engine_opts, engine_group)
|
listener_group: listener_opts},
|
||||||
CONF.register_group(listener_group)
|
cli_opts=[ostro_cli_opts],
|
||||||
CONF.register_opts(listener_opts, listener_group)
|
default_config_files=default_config_files)
|
||||||
CONF.register_cli_opts(ostro_cli_opts)
|
|
||||||
|
@ -21,9 +21,9 @@ import pika
|
|||||||
import pprint
|
import pprint
|
||||||
import threading
|
import threading
|
||||||
import traceback
|
import traceback
|
||||||
from valet.api.db.models.music import Music
|
from valet.common.conf import get_logger
|
||||||
|
from valet.common.music import Music
|
||||||
from valet.engine.listener.oslo_messages import OsloMessage
|
from valet.engine.listener.oslo_messages import OsloMessage
|
||||||
from valet.engine.optimizer.util.util import init_logger
|
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
|
||||||
@ -36,7 +36,7 @@ class ListenerManager(threading.Thread):
|
|||||||
self.thread_id = _t_id
|
self.thread_id = _t_id
|
||||||
self.thread_name = _t_name
|
self.thread_name = _t_name
|
||||||
self.config = _config
|
self.config = _config
|
||||||
self.listener_logger = init_logger(self.config.events_listener)
|
self.listener_logger = get_logger("ostro_listener")
|
||||||
self.MUSIC = None
|
self.MUSIC = None
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
|
@ -181,11 +181,9 @@ class AppHandler(object):
|
|||||||
return vm_info
|
return vm_info
|
||||||
|
|
||||||
def update_vm_info(self, _s_uuid, _h_uuid):
|
def update_vm_info(self, _s_uuid, _h_uuid):
|
||||||
"""Update vm info (the ids) in the database."""
|
if _h_uuid and _h_uuid != "none" and _s_uuid and _s_uuid != "none":
|
||||||
s_uuid_exist = bool(_s_uuid is not None and _s_uuid != "none")
|
|
||||||
h_uuid_exist = bool(_h_uuid is not None and _h_uuid != "none")
|
|
||||||
if s_uuid_exist and h_uuid_exist:
|
|
||||||
return self.db.update_vm_info(_s_uuid, _h_uuid)
|
return self.db.update_vm_info(_s_uuid, _h_uuid)
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _regenerate_app_topology(self, _stack_id, _app, _app_topology, _action):
|
def _regenerate_app_topology(self, _stack_id, _app, _app_topology, _action):
|
||||||
|
@ -17,7 +17,7 @@
|
|||||||
|
|
||||||
import json
|
import json
|
||||||
import operator
|
import operator
|
||||||
from valet.api.db.models.music import Music
|
from valet.common.music import Music
|
||||||
from valet.engine.optimizer.db_connect.event import Event
|
from valet.engine.optimizer.db_connect.event import Event
|
||||||
|
|
||||||
|
|
||||||
|
@ -135,24 +135,24 @@ class Ostro(object):
|
|||||||
resource_status = self.db.get_resource_status(
|
resource_status = self.db.get_resource_status(
|
||||||
self.resource.datacenter.name)
|
self.resource.datacenter.name)
|
||||||
if resource_status is None:
|
if resource_status is None:
|
||||||
|
self.logger.error("Ostro.bootstrap: failed to read from table: " + self.config.db_resource_table)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if len(resource_status) > 0:
|
if len(resource_status) > 0:
|
||||||
self.logger.info("Ostro.bootstrap: bootstrap from db")
|
self.logger.info("Ostro.bootstrap: bootstrap from db")
|
||||||
if self.resource.bootstrap_from_db(resource_status) is False:
|
if not self.resource.bootstrap_from_db(resource_status):
|
||||||
return False
|
self.logger.error("Ostro.bootstrap: failed to parse bootstrap data!")
|
||||||
else:
|
|
||||||
self.logger.info("bootstrap from OpenStack")
|
|
||||||
|
|
||||||
if self._set_hosts() is False:
|
self.logger.info("read bootstrap data from OpenStack")
|
||||||
|
if not self._set_hosts():
|
||||||
self.logger.error('_set_hosts is false')
|
self.logger.error('_set_hosts is false')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self._set_flavors() is False:
|
if not self._set_flavors():
|
||||||
self.logger.info("_set_flavors is false")
|
self.logger.info("_set_flavors is false")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
if self._set_topology() is False:
|
if not self._set_topology():
|
||||||
self.logger.error("_set_topology is false")
|
self.logger.error("_set_topology is false")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@ -167,29 +167,31 @@ class Ostro(object):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
def _set_topology(self):
|
def _set_topology(self):
|
||||||
if self.topology.set_topology() is False:
|
if not self.topology.set_topology():
|
||||||
self.status = "datacenter configuration error"
|
self.status = "datacenter configuration error"
|
||||||
|
self.logger.error("failed to read datacenter topology")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
self.logger.debug("done topology bootstrap")
|
self.logger.debug("done topology bootstrap")
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _set_hosts(self):
|
def _set_hosts(self):
|
||||||
if self.compute.set_hosts() is False:
|
if not self.compute.set_hosts():
|
||||||
self.status = "OpenStack (Nova) internal error"
|
self.status = "OpenStack (Nova) internal error"
|
||||||
|
self.logger.error("failed to read hosts from OpenStack (Nova)")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
self.logger.debug("done hosts & groups bootstrap")
|
self.logger.debug("done hosts & groups bootstrap")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def _set_flavors(self):
|
def _set_flavors(self):
|
||||||
self.logger.debug("start flavors bootstrap")
|
self.logger.debug("start flavors bootstrap")
|
||||||
if self.compute.set_flavors() is False:
|
if not self.compute.set_flavors():
|
||||||
self.status = "OpenStack (Nova) internal error"
|
self.status = "OpenStack (Nova) internal error"
|
||||||
|
self.logger.error("failed to read flavors from OpenStack (Nova)")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
self.logger.debug("done flavors bootstrap")
|
self.logger.debug("done flavors bootstrap")
|
||||||
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def place_app(self, _app_data):
|
def place_app(self, _app_data):
|
||||||
@ -263,21 +265,21 @@ class Ostro(object):
|
|||||||
query_results[q["stack_id"]] = vm_list
|
query_results[q["stack_id"]] = vm_list
|
||||||
else:
|
else:
|
||||||
self.status = "unknown paramenter in query"
|
self.status = "unknown paramenter in query"
|
||||||
self.logger.warn("Ostro._query: " + self.status)
|
self.logger.warn("Ostro._query: unknown paramenter in query")
|
||||||
query_results[q["stack_id"]] = None
|
query_results[q["stack_id"]] = None
|
||||||
else:
|
else:
|
||||||
self.status = "no parameters in query"
|
self.status = "no paramenter in query"
|
||||||
self.logger.warn("Ostro._query: " + self.status)
|
self.logger.warn("Ostro._query: no parameters in query")
|
||||||
query_results[q["stack_id"]] = None
|
query_results[q["stack_id"]] = None
|
||||||
elif q["type"] == "all_groups":
|
elif q["type"] == "all_groups":
|
||||||
query_results[q["stack_id"]] = self._get_logical_groups()
|
query_results[q["stack_id"]] = self._get_logical_groups()
|
||||||
else:
|
else:
|
||||||
self.status = "unknown query type"
|
self.status = "unknown query type"
|
||||||
self.logger.warn("Ostro._query: " + self.status)
|
self.logger.warn("Ostro._query: unknown query type")
|
||||||
query_results[q["stack_id"]] = None
|
query_results[q["stack_id"]] = None
|
||||||
else:
|
else:
|
||||||
self.status = "no type in query"
|
self.status = "unknown type in query"
|
||||||
self.logger.warn("Ostro._query: " + self.status)
|
self.logger.warn("Ostro._query: no type in query")
|
||||||
query_results[q["stack_id"]] = None
|
query_results[q["stack_id"]] = None
|
||||||
|
|
||||||
return query_results
|
return query_results
|
||||||
@ -313,20 +315,20 @@ class Ostro(object):
|
|||||||
app_topology = self.app_handler.add_app(_app_data)
|
app_topology = self.app_handler.add_app(_app_data)
|
||||||
if app_topology is None:
|
if app_topology is None:
|
||||||
self.status = self.app_handler.status
|
self.status = self.app_handler.status
|
||||||
self.logger.debug("Ostro._place_app: error while register "
|
self.logger.error("Ostro._place_app: error while register"
|
||||||
"requested apps: " + self.status)
|
"requested apps: " + self.app_handler.status)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
"""Check and set vm flavor information."""
|
"""Check and set vm flavor information."""
|
||||||
for _, vm in app_topology.vms.iteritems():
|
for _, vm in app_topology.vms.iteritems():
|
||||||
if self._set_vm_flavor_information(vm) is False:
|
if self._set_vm_flavor_information(vm) is False:
|
||||||
self.status = "fail to set flavor information"
|
self.status = "fail to set flavor information"
|
||||||
self.logger.error("Ostro._place_app: " + self.status)
|
self.logger.error("Ostro._place_app: failed to set flavor information ")
|
||||||
return None
|
return None
|
||||||
for _, vg in app_topology.vgroups.iteritems():
|
for _, vg in app_topology.vgroups.iteritems():
|
||||||
if self._set_vm_flavor_information(vg) is False:
|
if self._set_vm_flavor_information(vg) is False:
|
||||||
self.status = "fail to set flavor information in a group"
|
self.status = "fail to set flavor information in a group"
|
||||||
self.logger.error("Ostro._place_app: " + self.status)
|
self.logger.error("Ostro._place_app: failed to set flavor information in a group")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
"""Set weights for optimization."""
|
"""Set weights for optimization."""
|
||||||
@ -688,7 +690,7 @@ class Ostro(object):
|
|||||||
app_status['message'] = "ping"
|
app_status['message'] = "ping"
|
||||||
|
|
||||||
app_result['status'] = app_status
|
app_result['status'] = app_status
|
||||||
app_result['resources'] = {"ip": self.config.ip}
|
app_result['resources'] = {"ip": self.config.ip, "id": self.config.priority}
|
||||||
|
|
||||||
result[appk] = app_result
|
result[appk] = app_result
|
||||||
|
|
||||||
|
@ -19,7 +19,7 @@
|
|||||||
|
|
||||||
import os
|
import os
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from valet.engine.conf import register_conf
|
from valet.engine.conf import init_engine
|
||||||
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
@ -29,12 +29,7 @@ class Config(object):
|
|||||||
"""Valet Engine Server Configuration."""
|
"""Valet Engine Server Configuration."""
|
||||||
|
|
||||||
def __init__(self, *default_config_files):
|
def __init__(self, *default_config_files):
|
||||||
"""Initialization."""
|
init_engine(default_config_files=default_config_files)
|
||||||
register_conf()
|
|
||||||
if default_config_files:
|
|
||||||
CONF(default_config_files=default_config_files)
|
|
||||||
else:
|
|
||||||
CONF(project='valet')
|
|
||||||
|
|
||||||
# System parameters
|
# System parameters
|
||||||
self.root_loc = os.path.dirname(CONF.default_config_files[0])
|
self.root_loc = os.path.dirname(CONF.default_config_files[0])
|
||||||
|
@ -17,10 +17,13 @@
|
|||||||
|
|
||||||
import atexit
|
import atexit
|
||||||
import os
|
import os
|
||||||
|
from oslo_config import cfg
|
||||||
from signal import SIGTERM
|
from signal import SIGTERM
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
class Daemon(object):
|
class Daemon(object):
|
||||||
"""A generic daemon class."""
|
"""A generic daemon class."""
|
||||||
@ -82,10 +85,10 @@ class Daemon(object):
|
|||||||
os.dup2(so.fileno(), sys.stdout.fileno())
|
os.dup2(so.fileno(), sys.stdout.fileno())
|
||||||
os.dup2(se.fileno(), sys.stderr.fileno())
|
os.dup2(se.fileno(), sys.stderr.fileno())
|
||||||
|
|
||||||
# write pidfile
|
|
||||||
atexit.register(self.delpid)
|
atexit.register(self.delpid)
|
||||||
pid = str(os.getpid())
|
# write pidfile - moved to OstroDaemon.run
|
||||||
file(self.pidfile, 'w+').write("%s\n" % pid)
|
# pid = str(os.getpid())
|
||||||
|
# file(self.pidfile, 'w+').write("%s\n" % pid)
|
||||||
|
|
||||||
def delpid(self):
|
def delpid(self):
|
||||||
"""Remove pidfile."""
|
"""Remove pidfile."""
|
||||||
@ -102,17 +105,16 @@ class Daemon(object):
|
|||||||
return pid
|
return pid
|
||||||
|
|
||||||
def checkpid(self, pid):
|
def checkpid(self, pid):
|
||||||
"""Check for the existence of a UNIX pid."""
|
""" Check For the existence of a unix pid. """
|
||||||
if pid is None:
|
alive = False
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if pid:
|
||||||
os.kill(pid, 0)
|
os.kill(pid, 0)
|
||||||
|
alive = True
|
||||||
except OSError:
|
except OSError:
|
||||||
self.delpid()
|
self.delpid()
|
||||||
return False
|
|
||||||
else:
|
return alive
|
||||||
return True
|
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
"""Start thedaemon."""
|
"""Start thedaemon."""
|
||||||
@ -152,11 +154,6 @@ class Daemon(object):
|
|||||||
# print str(err)
|
# print str(err)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
def restart(self):
|
|
||||||
"""Restart the daemon."""
|
|
||||||
self.stop()
|
|
||||||
self.start()
|
|
||||||
|
|
||||||
def status(self):
|
def status(self):
|
||||||
"""Return instance's priority."""
|
"""Return instance's priority."""
|
||||||
# Check for a pidfile to see if the daemon already runs
|
# Check for a pidfile to see if the daemon already runs
|
||||||
@ -174,6 +171,11 @@ class Daemon(object):
|
|||||||
sys.stderr.write(message % self.pidfile)
|
sys.stderr.write(message % self.pidfile)
|
||||||
return status
|
return status
|
||||||
|
|
||||||
|
def restart(self):
|
||||||
|
"""Restart the daemon"""
|
||||||
|
self.stop()
|
||||||
|
self.start()
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
"""You should override this method when you subclass Daemon."""
|
"""You should override this method when you subclass Daemon."""
|
||||||
"""It will be called after the process has been daemonized by
|
"""It will be called after the process has been daemonized by
|
||||||
|
@ -18,8 +18,7 @@
|
|||||||
"""Database Cleaner."""
|
"""Database Cleaner."""
|
||||||
|
|
||||||
from configuration import Config
|
from configuration import Config
|
||||||
import sys
|
from valet.common.music import Music
|
||||||
from valet.api.db.models.music import Music
|
|
||||||
|
|
||||||
|
|
||||||
class DBCleaner(object):
|
class DBCleaner(object):
|
||||||
|
125
valet/engine/optimizer/ostro_server/health_checker.py
Normal file
125
valet/engine/optimizer/ostro_server/health_checker.py
Normal file
@ -0,0 +1,125 @@
|
|||||||
|
import json
|
||||||
|
import os
|
||||||
|
from oslo_config import cfg
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import uuid
|
||||||
|
from valet.common.conf import get_logger
|
||||||
|
from valet.common.music import REST
|
||||||
|
from valet.engine.conf import init_engine
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class HealthCheck(object):
|
||||||
|
|
||||||
|
rest = None
|
||||||
|
|
||||||
|
def __init__(self, hosts=[], port='8080', keyspace='valet'):
|
||||||
|
|
||||||
|
self.tries = CONF.engine.health_timeout * 2
|
||||||
|
self.uuid = str(uuid.uuid4())
|
||||||
|
|
||||||
|
kwargs = {
|
||||||
|
'hosts': hosts,
|
||||||
|
'port': CONF.music.port,
|
||||||
|
'path': '/MUSIC/rest',
|
||||||
|
'timeout': CONF.music.interval,
|
||||||
|
}
|
||||||
|
self.rest = REST(**kwargs)
|
||||||
|
|
||||||
|
def ping(self, my_id):
|
||||||
|
|
||||||
|
engine_alive = False
|
||||||
|
try:
|
||||||
|
if self._send():
|
||||||
|
engine_alive = self._read_response(my_id)
|
||||||
|
finally:
|
||||||
|
self._delete_result()
|
||||||
|
return engine_alive
|
||||||
|
|
||||||
|
def _send(self):
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"values": {"stack_id": self.uuid,
|
||||||
|
"request": "[{\"action\": \"ping\", \"stack_id\": \"" + self.uuid + "\"}]"
|
||||||
|
},
|
||||||
|
"consistencyInfo": {"type": "eventual"}
|
||||||
|
}
|
||||||
|
|
||||||
|
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows' % {
|
||||||
|
'keyspace': CONF.music.keyspace,
|
||||||
|
'table': CONF.music.request_table,
|
||||||
|
}
|
||||||
|
response = self.rest.request(method='post', path=path, data=data)
|
||||||
|
# print "SEND response: " + str(response.status_code)
|
||||||
|
return response.status_code == 204 if response else False
|
||||||
|
|
||||||
|
def _read_response(self, my_id):
|
||||||
|
|
||||||
|
found = False
|
||||||
|
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' % {
|
||||||
|
'keyspace': CONF.music.keyspace,
|
||||||
|
'table': CONF.music.response_table,
|
||||||
|
'uid': self.uuid,
|
||||||
|
}
|
||||||
|
|
||||||
|
for i in range(self.tries): # default: 12 * 0.5 = 6 sec.
|
||||||
|
time.sleep(0.5)
|
||||||
|
response = self.rest.request(method='get', path=path)
|
||||||
|
|
||||||
|
# logger.debug("READ respons body text: " + str(response.text))
|
||||||
|
|
||||||
|
if response.status_code == 200 and len(response.text) > 3:
|
||||||
|
|
||||||
|
j = json.loads(response.text)
|
||||||
|
stack_id = j['row 0']['stack_id']
|
||||||
|
placement = json.loads(j['row 0']['placement'])
|
||||||
|
engine_id = placement['resources']['id']
|
||||||
|
|
||||||
|
# logger.debug("health 'stack_id': " + stack_id + ", engine_id=" + str(engine_id))
|
||||||
|
if stack_id == self.uuid and engine_id == my_id:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
return found
|
||||||
|
|
||||||
|
def _delete_result(self):
|
||||||
|
# leave the table clean - delete from requests and responses
|
||||||
|
try:
|
||||||
|
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' % {
|
||||||
|
'keyspace': CONF.music.keyspace,
|
||||||
|
'table': CONF.music.request_table,
|
||||||
|
'uid': self.uuid
|
||||||
|
}
|
||||||
|
data = {
|
||||||
|
"consistencyInfo": {"type": "eventual"}
|
||||||
|
}
|
||||||
|
self.rest.request(method='delete', path=path, data=data)
|
||||||
|
|
||||||
|
path = '/keyspaces/%(keyspace)s/tables/%(table)s/rows?stack_id=%(uid)s' % {
|
||||||
|
'keyspace': CONF.music.keyspace,
|
||||||
|
'table': CONF.music.response_table,
|
||||||
|
'uid': self.uuid
|
||||||
|
}
|
||||||
|
self.rest.request(method='delete', path=path, data=data)
|
||||||
|
|
||||||
|
# print "DELETE response: " + str(response.status_code)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
alive = False
|
||||||
|
code = 0
|
||||||
|
init_engine(default_config_files=['/etc/valet/valet.conf'])
|
||||||
|
logger = get_logger("ostro_daemon")
|
||||||
|
if os.path.exists(CONF.engine.pid):
|
||||||
|
alive = HealthCheck().ping(CONF.engine.priority)
|
||||||
|
if alive:
|
||||||
|
code = CONF.engine.priority
|
||||||
|
logger.info("HealthCheck - Alive, priority = {}".format(CONF.engine.priority))
|
||||||
|
else:
|
||||||
|
logger.warn("HealthCheck - Engine is DEAD!")
|
||||||
|
sys.exit(code)
|
@ -18,10 +18,10 @@
|
|||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
from valet.common.conf import get_logger
|
||||||
from valet.engine.optimizer.ostro.ostro import Ostro
|
from valet.engine.optimizer.ostro.ostro import Ostro
|
||||||
from valet.engine.optimizer.ostro_server.configuration import Config
|
from valet.engine.optimizer.ostro_server.configuration import Config
|
||||||
from valet.engine.optimizer.ostro_server.daemon import Daemon
|
from valet.engine.optimizer.ostro_server.daemon import Daemon
|
||||||
from valet.engine.optimizer.util.util import init_logger
|
|
||||||
|
|
||||||
|
|
||||||
class OstroDaemon(Daemon):
|
class OstroDaemon(Daemon):
|
||||||
@ -39,6 +39,10 @@ class OstroDaemon(Daemon):
|
|||||||
self.logger.error("ostro bootstrap failed")
|
self.logger.error("ostro bootstrap failed")
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
|
# write pidfile
|
||||||
|
pid = str(os.getpid())
|
||||||
|
file(self.pidfile, 'w+').write("%s\n" % pid)
|
||||||
|
|
||||||
ostro.run_ostro()
|
ostro.run_ostro()
|
||||||
|
|
||||||
|
|
||||||
@ -58,6 +62,8 @@ if __name__ == "__main__":
|
|||||||
# Configuration
|
# Configuration
|
||||||
try:
|
try:
|
||||||
config = Config()
|
config = Config()
|
||||||
|
''' logger '''
|
||||||
|
logger = get_logger("ostro_daemon")
|
||||||
config_status = config.configure()
|
config_status = config.configure()
|
||||||
if config_status != "success":
|
if config_status != "success":
|
||||||
print(config_status)
|
print(config_status)
|
||||||
@ -68,9 +74,6 @@ if __name__ == "__main__":
|
|||||||
config.app_log_loc, os.path.dirname(config.process)]
|
config.app_log_loc, os.path.dirname(config.process)]
|
||||||
verify_dirs(dirs_list)
|
verify_dirs(dirs_list)
|
||||||
|
|
||||||
""" logger """
|
|
||||||
logger = init_logger(config)
|
|
||||||
|
|
||||||
# Start daemon process
|
# Start daemon process
|
||||||
daemon = OstroDaemon(config.priority, config.process, logger)
|
daemon = OstroDaemon(config.priority, config.process, logger)
|
||||||
|
|
||||||
@ -85,7 +88,7 @@ if __name__ == "__main__":
|
|||||||
exit_code = exit_code or 0
|
exit_code = exit_code or 0
|
||||||
|
|
||||||
except Exception:
|
except Exception:
|
||||||
logger.error(traceback.format_exc())
|
print(traceback.format_exc())
|
||||||
exit_code = 2
|
exit_code = 2
|
||||||
|
|
||||||
sys.exit(int(exit_code))
|
sys.exit(int(exit_code))
|
||||||
|
@ -17,8 +17,6 @@
|
|||||||
|
|
||||||
from os import listdir, stat
|
from os import listdir, stat
|
||||||
from os.path import isfile, join
|
from os.path import isfile, join
|
||||||
import logging
|
|
||||||
from logging.handlers import RotatingFileHandler
|
|
||||||
|
|
||||||
|
|
||||||
def get_logfile(_loc, _max_log_size, _name):
|
def get_logfile(_loc, _max_log_size, _name):
|
||||||
@ -88,22 +86,3 @@ def adjust_json_string(_data):
|
|||||||
_data = _data.replace('_"true"', "_true")
|
_data = _data.replace('_"true"', "_true")
|
||||||
|
|
||||||
return _data
|
return _data
|
||||||
|
|
||||||
|
|
||||||
def init_logger(config):
|
|
||||||
"""Return an initialized logger."""
|
|
||||||
log_formatter = logging.Formatter("%(asctime)s - %(levelname)s - "
|
|
||||||
"%(message)s")
|
|
||||||
log_handler = RotatingFileHandler(config.logging_loc + config.logger_name,
|
|
||||||
mode='a',
|
|
||||||
maxBytes=config.max_main_log_size,
|
|
||||||
backupCount=2,
|
|
||||||
encoding=None,
|
|
||||||
delay=0)
|
|
||||||
log_handler.setFormatter(log_formatter)
|
|
||||||
logger = logging.getLogger(config.logger_name)
|
|
||||||
logger.setLevel(logging.DEBUG if config.logging_level == "debug"
|
|
||||||
else logging.INFO)
|
|
||||||
logger.addHandler(log_handler)
|
|
||||||
|
|
||||||
return logger
|
|
||||||
|
@ -184,7 +184,7 @@ class Compute(object):
|
|||||||
if hasattr(hv, 'servers'):
|
if hasattr(hv, 'servers'):
|
||||||
server_list = hv.__getattr__('servers')
|
server_list = hv.__getattr__('servers')
|
||||||
for s in server_list:
|
for s in server_list:
|
||||||
_vm_list.append(s.uuid)
|
_vm_list.append(s['uuid'])
|
||||||
|
|
||||||
except (ValueError, KeyError, TypeError):
|
except (ValueError, KeyError, TypeError):
|
||||||
self.logger.error(traceback.format_exc())
|
self.logger.error(traceback.format_exc())
|
||||||
@ -198,7 +198,7 @@ class Compute(object):
|
|||||||
try:
|
try:
|
||||||
vm_name = server.name
|
vm_name = server.name
|
||||||
_vm_detail.append(vm_name)
|
_vm_detail.append(vm_name)
|
||||||
az = server.__getattr("OS-EXT-AZ:availability_zone")
|
az = server.__getattr__("OS-EXT-AZ:availability_zone")
|
||||||
_vm_detail.append(az)
|
_vm_detail.append(az)
|
||||||
metadata = server.metadata
|
metadata = server.metadata
|
||||||
_vm_detail.append(metadata)
|
_vm_detail.append(metadata)
|
||||||
|
@ -49,9 +49,9 @@ priority=1
|
|||||||
host=valet1
|
host=valet1
|
||||||
user=m04060
|
user=m04060
|
||||||
stand_by_list=valet2
|
stand_by_list=valet2
|
||||||
start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c start'" % (user, host)
|
start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c restart'" % (user, host)
|
||||||
stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host)
|
stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host)
|
||||||
test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py ; exit $?'" % (user, host)
|
test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/health_checker.py ; exit $?'" % (user, host)
|
||||||
|
|
||||||
|
|
||||||
[ValetApi]
|
[ValetApi]
|
||||||
|
@ -66,7 +66,6 @@ max_log_size = 1000000
|
|||||||
# Set the maximum number of time-series log files
|
# Set the maximum number of time-series log files
|
||||||
max_num_of_logs = 10
|
max_num_of_logs = 10
|
||||||
|
|
||||||
|
|
||||||
PRIMARY_SETUP = 1
|
PRIMARY_SETUP = 1
|
||||||
RETRY_COUNT = 3 # How many times to retry ping command
|
RETRY_COUNT = 3 # How many times to retry ping command
|
||||||
CONNECT_TIMEOUT = 3 # Ping timeout
|
CONNECT_TIMEOUT = 3 # Ping timeout
|
||||||
@ -74,7 +73,7 @@ MAX_QUICK_STARTS = 10 # we stop if there are > 10 restart in quick succession
|
|||||||
QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this
|
QUICK_RESTART_SEC = 150 # we consider it a quick restart if less than this
|
||||||
|
|
||||||
# HA Configuration
|
# HA Configuration
|
||||||
HEARTBEAT_SEC = 5 # Heartbeat interval in seconds
|
HEARTBEAT_SEC = 10 # Heartbeat interval in seconds
|
||||||
|
|
||||||
|
|
||||||
NAME = 'name'
|
NAME = 'name'
|
||||||
@ -101,6 +100,8 @@ havalet_opts = [
|
|||||||
cfg.StrOpt(TEST_COMMAND, help='test command')
|
cfg.StrOpt(TEST_COMMAND, help='test command')
|
||||||
]
|
]
|
||||||
|
|
||||||
|
# common.init_conf("havalet.log", grp2opt={api_group: havalet_opts, ostro_group: havalet_opts})
|
||||||
|
|
||||||
CONF.register_group(api_group)
|
CONF.register_group(api_group)
|
||||||
CONF.register_opts(havalet_opts, api_group)
|
CONF.register_opts(havalet_opts, api_group)
|
||||||
|
|
||||||
@ -225,7 +226,7 @@ class HaValetThread (threading.Thread):
|
|||||||
time.sleep(HEARTBEAT_SEC)
|
time.sleep(HEARTBEAT_SEC)
|
||||||
else:
|
else:
|
||||||
# No valet running. Wait for higher priority valet to activate.
|
# No valet running. Wait for higher priority valet to activate.
|
||||||
time.sleep(HEARTBEAT_SEC * my_priority)
|
time.sleep(HEARTBEAT_SEC / my_priority)
|
||||||
|
|
||||||
self.log.info('checking status here - ' + host +
|
self.log.info('checking status here - ' + host +
|
||||||
', my priority: ' + str(my_priority))
|
', my priority: ' + str(my_priority))
|
||||||
@ -388,7 +389,7 @@ class HaValetThread (threading.Thread):
|
|||||||
try:
|
try:
|
||||||
self.log.info('activate_command: ' + activate_command)
|
self.log.info('activate_command: ' + activate_command)
|
||||||
subprocess.check_call(activate_command, shell=True)
|
subprocess.check_call(activate_command, shell=True)
|
||||||
time.sleep(HEARTBEAT_SEC * priority) # allow some grace period
|
time.sleep(HEARTBEAT_SEC) # allow some grace period
|
||||||
return True
|
return True
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
self.log.error(str(e))
|
self.log.error(str(e))
|
||||||
|
@ -49,9 +49,9 @@ priority=2
|
|||||||
host=valet2
|
host=valet2
|
||||||
user=m04060
|
user=m04060
|
||||||
stand_by_list=valet1
|
stand_by_list=valet1
|
||||||
start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c start'" % (user, host)
|
start="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c restart'" % (user, host)
|
||||||
stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host)
|
stop="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py -c stop'" % (user, host)
|
||||||
test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/ostro_daemon.py ; exit $?'" % (user, host)
|
test="ssh -o ConnectTimeout=1 %s@%s 'python /usr/local/lib/python2.7/dist-packages/valet/engine/optimizer/ostro_server/health_checker.py ; exit $?'" % (user, host)
|
||||||
|
|
||||||
|
|
||||||
[ValetApi]
|
[ValetApi]
|
||||||
|
@ -15,15 +15,13 @@
|
|||||||
|
|
||||||
"""Base."""
|
"""Base."""
|
||||||
|
|
||||||
|
import mock
|
||||||
from oslo_config import fixture as fixture_config
|
from oslo_config import fixture as fixture_config
|
||||||
from oslo_log import log as logging
|
|
||||||
from oslotest.base import BaseTestCase
|
from oslotest.base import BaseTestCase
|
||||||
|
from valet import api
|
||||||
from valet.tests.functional.valet_validator.common import init
|
from valet.tests.functional.valet_validator.common import init
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Base(BaseTestCase):
|
class Base(BaseTestCase):
|
||||||
"""Test case base class for all unit tests."""
|
"""Test case base class for all unit tests."""
|
||||||
|
|
||||||
@ -33,6 +31,7 @@ class Base(BaseTestCase):
|
|||||||
|
|
||||||
self.CONF = self.useFixture(fixture_config.Config()).conf
|
self.CONF = self.useFixture(fixture_config.Config()).conf
|
||||||
init.prepare(self.CONF)
|
init.prepare(self.CONF)
|
||||||
|
api.LOG = mock.MagicMock()
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
"""Setup."""
|
"""Setup."""
|
||||||
|
@ -40,11 +40,3 @@ class TestConfig(Base):
|
|||||||
|
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
self.validate_test(isinstance(ex, cfg.ConfigFilesNotFoundError))
|
self.validate_test(isinstance(ex, cfg.ConfigFilesNotFoundError))
|
||||||
|
|
||||||
def test_config_io(self):
|
|
||||||
"""Test config I/O and validate config status is success."""
|
|
||||||
cfg.CONF.clear()
|
|
||||||
config = Config("etc/valet/valet.conf")
|
|
||||||
config_status = config.configure()
|
|
||||||
|
|
||||||
self.validate_test(config_status == "success")
|
|
||||||
|
76
valet/tests/unit/engine/test_ping.py
Normal file
76
valet/tests/unit/engine/test_ping.py
Normal file
@ -0,0 +1,76 @@
|
|||||||
|
import mock
|
||||||
|
import valet.engine.optimizer.ostro_server.health_checker as ping
|
||||||
|
from valet.engine.optimizer.ostro_server.health_checker import HealthCheck
|
||||||
|
from valet.tests.base import Base
|
||||||
|
|
||||||
|
json = r'{"row 0":{"placement": "{\"status\": {\"message\": \"ping\", \"type\": \"ok\"},' \
|
||||||
|
r'\"resources\": {\"ip\": \"localhost\", \"id\": %d}}","stack_id":"%s"}}'
|
||||||
|
|
||||||
|
|
||||||
|
class TestHealthCheck(Base):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestHealthCheck, self).setUp()
|
||||||
|
ping.CONF = mock.MagicMock()
|
||||||
|
ping.REST = mock.MagicMock()
|
||||||
|
self.pingger = HealthCheck()
|
||||||
|
|
||||||
|
@mock.patch.object(HealthCheck, '_send')
|
||||||
|
@mock.patch.object(HealthCheck, '_read_response')
|
||||||
|
def test_ping(self, mock_read, mock_send):
|
||||||
|
mock_send.return_value = True
|
||||||
|
mock_read.return_value = True
|
||||||
|
|
||||||
|
self.validate_test(self.pingger.ping(1))
|
||||||
|
|
||||||
|
@mock.patch.object(HealthCheck, '_send')
|
||||||
|
@mock.patch.object(HealthCheck, '_read_response')
|
||||||
|
def test_ping_unhappy(self, mock_read, mock_send):
|
||||||
|
mock_send.return_value = False
|
||||||
|
mock_read.return_value = True
|
||||||
|
|
||||||
|
self.validate_test(not self.pingger.ping(1))
|
||||||
|
|
||||||
|
@mock.patch.object(HealthCheck, '_send')
|
||||||
|
@mock.patch.object(HealthCheck, '_read_response')
|
||||||
|
def test_ping_unhappy_2(self, mock_read, mock_send):
|
||||||
|
mock_send.return_value = True
|
||||||
|
mock_read.return_value = False
|
||||||
|
|
||||||
|
self.validate_test(not self.pingger.ping(1))
|
||||||
|
|
||||||
|
def test_send(self):
|
||||||
|
self.pingger.rest.request.return_value.status_code = 204
|
||||||
|
self.validate_test(self.pingger._send())
|
||||||
|
|
||||||
|
def test_send_unhappy(self):
|
||||||
|
self.pingger.rest.request.return_value.status_code = 200
|
||||||
|
self.validate_test(not self.pingger._send())
|
||||||
|
|
||||||
|
def test_read_response(self):
|
||||||
|
id = 1
|
||||||
|
self.pingger.rest.request.return_value.status_code = 200
|
||||||
|
self.pingger.rest.request.return_value.text = json % (id, self.pingger.uuid)
|
||||||
|
self.validate_test(self.pingger._read_response(id))
|
||||||
|
|
||||||
|
def test_read_response_from_other_engine(self):
|
||||||
|
my_id = 1
|
||||||
|
id = 2
|
||||||
|
self.pingger.rest.request.return_value.status_code = 200
|
||||||
|
self.pingger.rest.request.return_value.text = json % (id, self.pingger.uuid)
|
||||||
|
self.validate_test(not self.pingger._read_response(my_id))
|
||||||
|
|
||||||
|
def test_read_response_unhappy_wrong_res_code(self):
|
||||||
|
self.pingger.rest.request.return_value.status_code = 204
|
||||||
|
self.pingger.rest.request.return_value.text = self.pingger.uuid
|
||||||
|
self.validate_test(not self.pingger._read_response(1))
|
||||||
|
|
||||||
|
def test_read_response_unhappy_wrong_body(self):
|
||||||
|
self.pingger.rest.request.return_value.status_code = 200
|
||||||
|
self.pingger.rest.request.return_value.text = ""
|
||||||
|
self.validate_test(not self.pingger._read_response(1))
|
||||||
|
|
||||||
|
def test_read_response_unhappy_wrong_both(self):
|
||||||
|
self.pingger.rest.request.return_value.status_code = 204
|
||||||
|
self.pingger.rest.request.return_value.text = ""
|
||||||
|
self.validate_test(not self.pingger._read_response(1))
|
Loading…
Reference in New Issue
Block a user