Merge "Migrate Eventlet components to the new backend system"
This commit is contained in:
commit
8371e18677
@ -20,12 +20,22 @@ help_for_backdoor_port = (
|
||||
"in listening on a random tcp port number; <port> results in listening "
|
||||
"on the specified port number (and not enabling backdoor if that port "
|
||||
"is in use); and <start>:<end> results in listening on the smallest "
|
||||
"unused port number within the specified range of port numbers. The "
|
||||
"unused port number within the specified range of port numbers. The "
|
||||
"chosen port is displayed in the service's log file.")
|
||||
eventlet_backdoor_opts = [
|
||||
cfg.StrOpt('backdoor_port',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'backdoor_port' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help="Enable eventlet backdoor. %s" % help_for_backdoor_port),
|
||||
cfg.StrOpt('backdoor_socket',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'backdoor_socket' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help="Enable eventlet backdoor, using the provided path"
|
||||
" as a unix socket that can receive connections. This"
|
||||
" option is mutually exclusive with 'backdoor_port' in"
|
||||
@ -55,9 +65,19 @@ service_opts = [
|
||||
|
||||
wsgi_opts = [
|
||||
cfg.StrOpt('api_paste_config',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'api_paste_config' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
default="api-paste.ini",
|
||||
help='File name for the paste.deploy config for api service'),
|
||||
cfg.StrOpt('wsgi_log_format',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'wsgi_log_format' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
default='%(client_ip)s "%(request_line)s" status: '
|
||||
'%(status_code)s len: %(body_length)s time:'
|
||||
' %(wall_seconds).7f',
|
||||
@ -66,13 +86,28 @@ wsgi_opts = [
|
||||
'formatted into it: client_ip, date_time, request_line, '
|
||||
'status_code, body_length, wall_seconds.'),
|
||||
cfg.IntOpt('tcp_keepidle',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'tcp_keepidle' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
default=600,
|
||||
help="Sets the value of TCP_KEEPIDLE in seconds for each "
|
||||
"server socket. Not supported on OS X."),
|
||||
cfg.IntOpt('wsgi_default_pool_size',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'wsgi_default_pool_size' option is deprecated and will"
|
||||
" be removed in a future release."
|
||||
),
|
||||
default=100,
|
||||
help="Size of the pool of greenthreads used by wsgi"),
|
||||
cfg.IntOpt('max_header_line',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'max_header_line' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
default=16384,
|
||||
help="Maximum line size of message headers to be accepted. "
|
||||
"max_header_line may need to be increased when using "
|
||||
@ -80,15 +115,30 @@ wsgi_opts = [
|
||||
"is configured to use PKI tokens with big service "
|
||||
"catalogs)."),
|
||||
cfg.BoolOpt('wsgi_keep_alive',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'wsgi_keep_alive' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
default=True,
|
||||
help="If False, closes the client socket connection "
|
||||
"explicitly."),
|
||||
cfg.IntOpt('client_socket_timeout', default=900,
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'client_socket_timeout' option is deprecated and will"
|
||||
" be removed in a future release."
|
||||
),
|
||||
help="Timeout for client connections' socket operations. "
|
||||
"If an incoming connection is idle for this number of "
|
||||
"seconds it will be closed. A value of '0' means "
|
||||
"wait forever."),
|
||||
cfg.BoolOpt('wsgi_server_debug',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'wsgi_server_debug' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
default=False,
|
||||
help="True if the server should send exception tracebacks to "
|
||||
"the clients on 500 errors. If False, the server will "
|
||||
@ -97,21 +147,46 @@ wsgi_opts = [
|
||||
|
||||
ssl_opts = [
|
||||
cfg.StrOpt('ca_file',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'ca_file' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help="CA certificate file to use to verify "
|
||||
"connecting clients."),
|
||||
cfg.StrOpt('cert_file',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'cert_file' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help="Certificate file to use when starting "
|
||||
"the server securely."),
|
||||
cfg.StrOpt('key_file',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'key_file' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help="Private key file to use when starting "
|
||||
"the server securely."),
|
||||
cfg.StrOpt('version',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'version' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help='SSL version to use (valid only if SSL enabled). '
|
||||
'Valid values are TLSv1 and SSLv23. SSLv2, SSLv3, '
|
||||
'TLSv1_1, and TLSv1_2 may be available on some '
|
||||
'distributions.'
|
||||
),
|
||||
cfg.StrOpt('ciphers',
|
||||
deprecated_for_removal=True,
|
||||
deprecated_reason=(
|
||||
"The 'ciphers' option is deprecated and will be"
|
||||
" removed in a future release."
|
||||
),
|
||||
help='Sets the list of available ciphers. value should be a '
|
||||
'string in the OpenSSL cipher list format.'
|
||||
),
|
||||
|
@ -15,19 +15,13 @@
|
||||
|
||||
|
||||
from oslo_service.backend.base import BaseBackend
|
||||
from oslo_service import loopingcall
|
||||
from oslo_service import service
|
||||
from oslo_service import threadgroup
|
||||
from oslo_service.backend.eventlet import loopingcall
|
||||
from oslo_service.backend.eventlet import service
|
||||
from oslo_service.backend.eventlet import threadgroup
|
||||
|
||||
|
||||
class EventletBackend(BaseBackend):
|
||||
"""Backend implementation for Eventlet.
|
||||
|
||||
In this revision, this is a "stub" for a real backend; right now it imports
|
||||
the regular implementation of oslo.service, which will be moved
|
||||
entirely into a backend in a subsequent patch.
|
||||
|
||||
"""
|
||||
"""Backend implementation for Eventlet."""
|
||||
|
||||
@staticmethod
|
||||
def get_service_components():
|
||||
|
439
oslo_service/backend/eventlet/loopingcall.py
Normal file
439
oslo_service/backend/eventlet/loopingcall.py
Normal file
@ -0,0 +1,439 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import functools
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import eventletutils
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import reflection
|
||||
from oslo_utils import timeutils
|
||||
|
||||
from oslo_service._i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCallBase.
|
||||
|
||||
The poll-function passed to LoopingCallBase can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCallBase.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCallBase.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallTimeOut(Exception):
|
||||
"""Exception for a timed out LoopingCall.
|
||||
|
||||
The LoopingCall will raise this exception when a timeout is provided
|
||||
and it is exceeded.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def _safe_wrapper(f, kind, func_name):
|
||||
"""Wrapper that calls into wrapped function and logs errors as needed."""
|
||||
|
||||
def func(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except LoopingCallDone:
|
||||
raise # let the outer handler process this
|
||||
except Exception:
|
||||
LOG.error('%(kind)s %(func_name)r failed',
|
||||
{'kind': kind, 'func_name': func_name},
|
||||
exc_info=True)
|
||||
return 0
|
||||
|
||||
return func
|
||||
|
||||
|
||||
class LoopingCallBase:
|
||||
_KIND = _("Unknown looping call")
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A looping call can only run one function"
|
||||
" at a time")
|
||||
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._thread = None
|
||||
self.done = None
|
||||
self._abort = eventletutils.EventletEvent()
|
||||
|
||||
@property
|
||||
def _running(self):
|
||||
return not self._abort.is_set()
|
||||
|
||||
def stop(self):
|
||||
if self._running:
|
||||
self._abort.set()
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
def _on_done(self, gt, *args, **kwargs):
|
||||
self._thread = None
|
||||
|
||||
def _sleep(self, timeout):
|
||||
self._abort.wait(timeout)
|
||||
|
||||
def _start(self, idle_for, initial_delay=None, stop_on_exception=True):
|
||||
"""Start the looping
|
||||
|
||||
:param idle_for: Callable that takes two positional arguments, returns
|
||||
how long to idle for. The first positional argument is
|
||||
the last result from the function being looped and the
|
||||
second positional argument is the time it took to
|
||||
calculate that result.
|
||||
:param initial_delay: How long to delay before starting the looping.
|
||||
Value is in seconds.
|
||||
:param stop_on_exception: Whether to stop if an exception occurs.
|
||||
:returns: eventlet event instance
|
||||
"""
|
||||
if self._thread is not None:
|
||||
raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
|
||||
self.done = event.Event()
|
||||
self._abort.clear()
|
||||
self._thread = greenthread.spawn(
|
||||
self._run_loop, idle_for,
|
||||
initial_delay=initial_delay, stop_on_exception=stop_on_exception)
|
||||
self._thread.link(self._on_done)
|
||||
return self.done
|
||||
|
||||
# NOTE(bnemec): This is just a wrapper function we can mock so we aren't
|
||||
# affected by other users of the StopWatch class.
|
||||
def _elapsed(self, watch):
|
||||
return watch.elapsed()
|
||||
|
||||
def _run_loop(self, idle_for_func,
|
||||
initial_delay=None, stop_on_exception=True):
|
||||
kind = self._KIND
|
||||
func_name = reflection.get_callable_name(self.f)
|
||||
func = self.f if stop_on_exception else _safe_wrapper(self.f, kind,
|
||||
func_name)
|
||||
if initial_delay:
|
||||
self._sleep(initial_delay)
|
||||
try:
|
||||
watch = timeutils.StopWatch()
|
||||
while self._running:
|
||||
watch.restart()
|
||||
result = func(*self.args, **self.kw)
|
||||
watch.stop()
|
||||
if not self._running:
|
||||
break
|
||||
idle = idle_for_func(result, self._elapsed(watch))
|
||||
LOG.trace('%(kind)s %(func_name)r sleeping '
|
||||
'for %(idle).02f seconds',
|
||||
{'func_name': func_name, 'idle': idle,
|
||||
'kind': kind})
|
||||
self._sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.done.send(e.retvalue)
|
||||
except Exception:
|
||||
exc_info = sys.exc_info()
|
||||
try:
|
||||
LOG.error('%(kind)s %(func_name)r failed',
|
||||
{'kind': kind, 'func_name': func_name},
|
||||
exc_info=exc_info)
|
||||
self.done.send_exception(*exc_info)
|
||||
finally:
|
||||
del exc_info
|
||||
return
|
||||
else:
|
||||
self.done.send(True)
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call can only run"
|
||||
" one function at a time")
|
||||
|
||||
_KIND = _('Fixed interval looping call')
|
||||
|
||||
def start(self, interval, initial_delay=None, stop_on_exception=True):
|
||||
def _idle_for(result, elapsed):
|
||||
delay = round(elapsed - interval, 2)
|
||||
if delay > 0:
|
||||
func_name = reflection.get_callable_name(self.f)
|
||||
LOG.warning('Function %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec',
|
||||
{'func_name': func_name, 'delay': delay})
|
||||
return -delay if delay < 0 else 0
|
||||
return self._start(_idle_for, initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
|
||||
|
||||
class FixedIntervalWithTimeoutLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call with timeout checking mechanism."""
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call with timeout"
|
||||
" checking and can only run one function at"
|
||||
" at a time")
|
||||
|
||||
_KIND = _('Fixed interval looping call with timeout checking.')
|
||||
|
||||
def start(self, interval, initial_delay=None,
|
||||
stop_on_exception=True, timeout=0):
|
||||
start_time = time.time()
|
||||
|
||||
def _idle_for(result, elapsed):
|
||||
delay = round(elapsed - interval, 2)
|
||||
if delay > 0:
|
||||
func_name = reflection.get_callable_name(self.f)
|
||||
LOG.warning('Function %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec',
|
||||
{'func_name': func_name, 'delay': delay})
|
||||
elapsed_time = time.time() - start_time
|
||||
if timeout > 0 and elapsed_time > timeout:
|
||||
raise LoopingCallTimeOut(
|
||||
_('Looping call timed out after %.02f seconds')
|
||||
% elapsed_time)
|
||||
return -delay if delay < 0 else 0
|
||||
|
||||
return self._start(_idle_for, initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A dynamic interval looping call can only run"
|
||||
" one function at a time")
|
||||
_TASK_MISSING_SLEEP_VALUE_MESSAGE = _(
|
||||
"A dynamic interval looping call should supply either an"
|
||||
" interval or periodic_interval_max"
|
||||
)
|
||||
|
||||
_KIND = _('Dynamic interval looping call')
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None,
|
||||
stop_on_exception=True):
|
||||
def _idle_for(suggested_delay, elapsed):
|
||||
delay = suggested_delay
|
||||
if delay is None:
|
||||
if periodic_interval_max is not None:
|
||||
delay = periodic_interval_max
|
||||
else:
|
||||
# Note(suro-patz): An application used to receive a
|
||||
# TypeError thrown from eventlet layer, before
|
||||
# this RuntimeError was introduced.
|
||||
raise RuntimeError(
|
||||
self._TASK_MISSING_SLEEP_VALUE_MESSAGE)
|
||||
else:
|
||||
if periodic_interval_max is not None:
|
||||
delay = min(delay, periodic_interval_max)
|
||||
return delay
|
||||
|
||||
return self._start(_idle_for, initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
|
||||
|
||||
class BackOffLoopingCall(LoopingCallBase):
|
||||
"""Run a method in a loop with backoff on error.
|
||||
|
||||
The passed in function should return True (no error, return to
|
||||
initial_interval),
|
||||
False (error, start backing off), or raise LoopingCallDone(retvalue=None)
|
||||
(quit looping, return retvalue if set).
|
||||
|
||||
When there is an error, the call will backoff on each failure. The
|
||||
backoff will be equal to double the previous base interval times some
|
||||
jitter. If a backoff would put it over the timeout, it halts immediately,
|
||||
so the call will never take more than timeout, but may and likely will
|
||||
take less time.
|
||||
|
||||
When the function return value is True or False, the interval will be
|
||||
multiplied by a random jitter. If min_jitter or max_jitter is None,
|
||||
there will be no jitter (jitter=1). If min_jitter is below 0.5, the code
|
||||
may not backoff and may increase its retry rate.
|
||||
|
||||
If func constantly returns True, this function will not return.
|
||||
|
||||
To run a func and wait for a call to finish (by raising a LoopingCallDone):
|
||||
|
||||
timer = BackOffLoopingCall(func)
|
||||
response = timer.start().wait()
|
||||
|
||||
:param initial_delay: delay before first running of function
|
||||
:param starting_interval: initial interval in seconds between calls to
|
||||
function. When an error occurs and then a
|
||||
success, the interval is returned to
|
||||
starting_interval
|
||||
:param timeout: time in seconds before a LoopingCallTimeout is raised.
|
||||
The call will never take longer than timeout, but may quit
|
||||
before timeout.
|
||||
:param max_interval: The maximum interval between calls during errors
|
||||
:param jitter: Used to vary when calls are actually run to avoid group of
|
||||
calls all coming at the exact same time. Uses
|
||||
random.gauss(jitter, 0.1), with jitter as the mean for the
|
||||
distribution. If set below .5, it can cause the calls to
|
||||
come more rapidly after each failure.
|
||||
:param min_interval: The minimum interval in seconds between calls to
|
||||
function.
|
||||
:raises: LoopingCallTimeout if time spent doing error retries would exceed
|
||||
timeout.
|
||||
"""
|
||||
|
||||
_RNG = random.SystemRandom()
|
||||
_KIND = _('Dynamic backoff interval looping call')
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A dynamic backoff interval looping call can"
|
||||
" only run one function at a time")
|
||||
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
super().__init__(f=f, *args, **kw)
|
||||
self._error_time = 0
|
||||
self._interval = 1
|
||||
|
||||
def start(self, initial_delay=None, starting_interval=1, timeout=300,
|
||||
max_interval=300, jitter=0.75, min_interval=0.001):
|
||||
if self._thread is not None:
|
||||
raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
|
||||
|
||||
# Reset any prior state.
|
||||
self._error_time = 0
|
||||
self._interval = starting_interval
|
||||
|
||||
def _idle_for(success, _elapsed):
|
||||
random_jitter = abs(self._RNG.gauss(jitter, 0.1))
|
||||
if success:
|
||||
# Reset error state now that it didn't error...
|
||||
self._interval = starting_interval
|
||||
self._error_time = 0
|
||||
return self._interval * random_jitter
|
||||
else:
|
||||
# Perform backoff, random jitter around the next interval
|
||||
# bounded by min_interval and max_interval.
|
||||
idle = max(self._interval * 2 * random_jitter, min_interval)
|
||||
idle = min(idle, max_interval)
|
||||
# Calculate the next interval based on the mean, so that the
|
||||
# backoff grows at the desired rate.
|
||||
self._interval = max(self._interval * 2 * jitter, min_interval)
|
||||
# Don't go over timeout, end early if necessary. If
|
||||
# timeout is 0, keep going.
|
||||
if timeout > 0 and self._error_time + idle > timeout:
|
||||
raise LoopingCallTimeOut(
|
||||
_('Looping call timed out after %.02f seconds')
|
||||
% (self._error_time + idle))
|
||||
self._error_time += idle
|
||||
return idle
|
||||
|
||||
return self._start(_idle_for, initial_delay=initial_delay)
|
||||
|
||||
|
||||
class RetryDecorator:
|
||||
"""Decorator for retrying a function upon suggested exceptions.
|
||||
|
||||
The decorated function is retried for the given number of times, and the
|
||||
sleep time between the retries is incremented until max sleep time is
|
||||
reached. If the max retry count is set to -1, then the decorated function
|
||||
is invoked indefinitely until an exception is thrown, and the caught
|
||||
exception is not in the list of suggested exceptions.
|
||||
"""
|
||||
|
||||
def __init__(self, max_retry_count=-1, inc_sleep_time=10,
|
||||
max_sleep_time=60, exceptions=()):
|
||||
"""Configure the retry object using the input params.
|
||||
|
||||
:param max_retry_count: maximum number of times the given function must
|
||||
be retried when one of the input 'exceptions'
|
||||
is caught. When set to -1, it will be retried
|
||||
indefinitely until an exception is thrown
|
||||
and the caught exception is not in param
|
||||
exceptions.
|
||||
:param inc_sleep_time: incremental time in seconds for sleep time
|
||||
between retries
|
||||
:param max_sleep_time: max sleep time in seconds beyond which the sleep
|
||||
time will not be incremented using param
|
||||
inc_sleep_time. On reaching this threshold,
|
||||
max_sleep_time will be used as the sleep time.
|
||||
:param exceptions: suggested exceptions for which the function must be
|
||||
retried, if no exceptions are provided (the default)
|
||||
then all exceptions will be reraised, and no
|
||||
retrying will be triggered.
|
||||
"""
|
||||
self._max_retry_count = max_retry_count
|
||||
self._inc_sleep_time = inc_sleep_time
|
||||
self._max_sleep_time = max_sleep_time
|
||||
self._exceptions = exceptions
|
||||
self._retry_count = 0
|
||||
self._sleep_time = 0
|
||||
|
||||
def __call__(self, f):
|
||||
func_name = reflection.get_callable_name(f)
|
||||
|
||||
def _func(*args, **kwargs):
|
||||
result = None
|
||||
try:
|
||||
if self._retry_count:
|
||||
LOG.debug("Invoking %(func_name)s; retry count is "
|
||||
"%(retry_count)d.",
|
||||
{'func_name': func_name,
|
||||
'retry_count': self._retry_count})
|
||||
result = f(*args, **kwargs)
|
||||
except self._exceptions:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
LOG.debug("Exception which is in the suggested list of "
|
||||
"exceptions occurred while invoking function:"
|
||||
" %s.",
|
||||
func_name)
|
||||
if (self._max_retry_count != -1 and
|
||||
self._retry_count >= self._max_retry_count):
|
||||
LOG.debug("Cannot retry %(func_name)s upon "
|
||||
"suggested exception "
|
||||
"since retry count (%(retry_count)d) "
|
||||
"reached max retry count "
|
||||
"(%(max_retry_count)d).",
|
||||
{'retry_count': self._retry_count,
|
||||
'max_retry_count': self._max_retry_count,
|
||||
'func_name': func_name})
|
||||
else:
|
||||
ctxt.reraise = False
|
||||
self._retry_count += 1
|
||||
self._sleep_time += self._inc_sleep_time
|
||||
return self._sleep_time
|
||||
raise LoopingCallDone(result)
|
||||
|
||||
@functools.wraps(f)
|
||||
def func(*args, **kwargs):
|
||||
loop = DynamicLoopingCall(_func, *args, **kwargs)
|
||||
evt = loop.start(periodic_interval_max=self._max_sleep_time)
|
||||
LOG.debug("Waiting for function %s to return.", func_name)
|
||||
return evt.wait()
|
||||
|
||||
return func
|
838
oslo_service/backend/eventlet/service.py
Normal file
838
oslo_service/backend/eventlet/service.py
Normal file
@ -0,0 +1,838 @@
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# Copyright 2011 Justin Santa Barbara
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import errno
|
||||
import functools
|
||||
import gc
|
||||
import inspect
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from eventlet import event
|
||||
from eventlet import tpool
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_service._i18n import _
|
||||
from oslo_service import _options
|
||||
from oslo_service.backend.eventlet import threadgroup
|
||||
from oslo_service import eventlet_backdoor
|
||||
from oslo_service import systemd
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
_LAUNCHER_RESTART_METHODS = ['reload', 'mutate']
|
||||
|
||||
|
||||
def _is_daemon():
|
||||
# The process group for a foreground process will match the
|
||||
# process group of the controlling terminal. If those values do
|
||||
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||
# the process is running in the background as a daemon.
|
||||
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||
try:
|
||||
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
# Could not get the fileno for stdout, so we must be a daemon.
|
||||
is_daemon = True
|
||||
except OSError as err:
|
||||
if err.errno == errno.ENOTTY:
|
||||
# Assume we are a daemon because there is no terminal.
|
||||
is_daemon = True
|
||||
else:
|
||||
raise
|
||||
return is_daemon
|
||||
|
||||
|
||||
def _is_sighup_and_daemon(signo):
|
||||
if not (SignalHandler().is_signal_supported('SIGHUP') and
|
||||
signo == signal.SIGHUP):
|
||||
# Avoid checking if we are a daemon, because the signal isn't
|
||||
# SIGHUP.
|
||||
return False
|
||||
return _is_daemon()
|
||||
|
||||
|
||||
def _check_service_base(service):
|
||||
if not isinstance(service, ServiceBase):
|
||||
raise TypeError(_("Service %(service)s must an instance of %(base)s!")
|
||||
% {'service': service, 'base': ServiceBase})
|
||||
|
||||
|
||||
class ServiceBase(metaclass=abc.ABCMeta):
|
||||
"""Base class for all services."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def start(self):
|
||||
"""Start service."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def stop(self):
|
||||
"""Stop service."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def wait(self):
|
||||
"""Wait for service to complete."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def reset(self):
|
||||
"""Reset service.
|
||||
|
||||
Called in case service running in daemon mode receives SIGHUP.
|
||||
"""
|
||||
|
||||
|
||||
class Singleton(type):
|
||||
_instances = {}
|
||||
_semaphores = lockutils.Semaphores()
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
with lockutils.lock('singleton_lock', semaphores=cls._semaphores):
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super().__call__(
|
||||
*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class SignalHandler(metaclass=Singleton):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.__setup_signal_interruption()
|
||||
|
||||
# Map all signal names to signal integer values and create a
|
||||
# reverse mapping (for easier + quick lookup).
|
||||
self._ignore_signals = ('SIG_DFL', 'SIG_IGN')
|
||||
self._signals_by_name = {name: getattr(signal, name)
|
||||
for name in dir(signal)
|
||||
if name.startswith("SIG") and
|
||||
name not in self._ignore_signals}
|
||||
self.signals_to_name = {
|
||||
sigval: name
|
||||
for (name, sigval) in self._signals_by_name.items()}
|
||||
self._signal_handlers = collections.defaultdict(list)
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
for sig in self._signal_handlers:
|
||||
signal.signal(sig, signal.SIG_DFL)
|
||||
self._signal_handlers.clear()
|
||||
|
||||
def add_handlers(self, signals, handler):
|
||||
for sig in signals:
|
||||
self.add_handler(sig, handler)
|
||||
|
||||
def add_handler(self, sig, handler):
|
||||
if not self.is_signal_supported(sig):
|
||||
return
|
||||
signo = self._signals_by_name[sig]
|
||||
self._signal_handlers[signo].append(handler)
|
||||
signal.signal(signo, self._handle_signal)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
# This method can be called anytime, even between two Python
|
||||
# instructions. It's scheduled by the C signal handler of Python using
|
||||
# Py_AddPendingCall().
|
||||
#
|
||||
# We only do one thing: schedule a call to _handle_signal_cb() later.
|
||||
# eventlet.spawn() is not signal-safe: _handle_signal() can be called
|
||||
# during a call to eventlet.spawn(). This case is supported, it is
|
||||
# ok to schedule multiple calls to _handle_signal() with the same
|
||||
# signal number.
|
||||
#
|
||||
# To call to _handle_signal_cb() is delayed to avoid reentrant calls to
|
||||
# _handle_signal_cb(). It avoids race conditions like reentrant call to
|
||||
# clear(): clear() is not reentrant (bug #1538204).
|
||||
eventlet.spawn(self._handle_signal_cb, signo, frame)
|
||||
|
||||
# On Python >= 3.5, ensure that eventlet's poll() or sleep() call is
|
||||
# interrupted by raising an exception. If the signal handler does not
|
||||
# raise an exception then due to PEP 475 the call will not return until
|
||||
# an event is detected on a file descriptor or the timeout is reached,
|
||||
# and thus eventlet will not wake up and notice that there has been a
|
||||
# new thread spawned.
|
||||
if self.__force_interrupt_on_signal:
|
||||
try:
|
||||
interrupted_frame = inspect.stack(context=0)[1]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
if ((interrupted_frame.function == 'do_poll' and
|
||||
interrupted_frame.filename == self.__hub_module_file) or
|
||||
(interrupted_frame.function == 'do_sleep' and
|
||||
interrupted_frame.filename == __file__)):
|
||||
raise OSError(errno.EINTR, 'Interrupted')
|
||||
|
||||
def __setup_signal_interruption(self):
|
||||
"""Set up to do the Right Thing with signals during poll() and sleep().
|
||||
|
||||
Deal with the changes introduced in PEP 475 that prevent a signal from
|
||||
interrupting eventlet's call to poll() or sleep().
|
||||
"""
|
||||
select_module = eventlet.patcher.original('select')
|
||||
self.__force_interrupt_on_signal = hasattr(select_module, 'poll')
|
||||
|
||||
if self.__force_interrupt_on_signal:
|
||||
try:
|
||||
from eventlet.hubs import poll as poll_hub
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# This is a function we can test for in the stack when handling
|
||||
# a signal - it's safe to raise an IOError with EINTR anywhere
|
||||
# in this function.
|
||||
def do_sleep(time_sleep_func, seconds):
|
||||
return time_sleep_func(seconds)
|
||||
|
||||
time_sleep = eventlet.patcher.original('time').sleep
|
||||
|
||||
# Wrap time.sleep to ignore the interruption error we're
|
||||
# injecting from the signal handler. This makes the behaviour
|
||||
# the same as sleep() in Python 2, where EINTR causes the
|
||||
# sleep to be interrupted (and not resumed), but no exception
|
||||
# is raised.
|
||||
@functools.wraps(time_sleep)
|
||||
def sleep_wrapper(seconds):
|
||||
try:
|
||||
return do_sleep(time_sleep, seconds)
|
||||
except (OSError, InterruptedError) as err:
|
||||
if err.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
poll_hub.sleep = sleep_wrapper
|
||||
|
||||
hub = eventlet.hubs.get_hub()
|
||||
self.__hub_module_file = sys.modules[hub.__module__].__file__
|
||||
|
||||
def _handle_signal_cb(self, signo, frame):
|
||||
for handler in reversed(self._signal_handlers[signo]):
|
||||
handler(signo, frame)
|
||||
|
||||
def is_signal_supported(self, sig_name):
|
||||
return sig_name in self._signals_by_name
|
||||
|
||||
|
||||
class Launcher:
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
def __init__(self, conf, restart_method='reload'):
|
||||
"""Initialize the service launcher.
|
||||
|
||||
:param restart_method: If 'reload', calls reload_config_files on
|
||||
SIGHUP. If 'mutate', calls mutate_config_files on SIGHUP. Other
|
||||
values produce a ValueError.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.conf = conf
|
||||
conf.register_opts(_options.service_opts)
|
||||
self.services = Services(restart_method=restart_method)
|
||||
self.backdoor_port = (
|
||||
eventlet_backdoor.initialize_if_enabled(self.conf))
|
||||
self.restart_method = restart_method
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
"""Load and start the given service.
|
||||
|
||||
:param service: The service you would like to start, must be an
|
||||
instance of :class:`oslo_service.service.ServiceBase`
|
||||
:param workers: This param makes this method compatible with
|
||||
ProcessLauncher.launch_service. It must be None, 1 or
|
||||
omitted.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
if workers is not None and workers != 1:
|
||||
raise ValueError(_("Launcher asked to start multiple workers"))
|
||||
_check_service_base(service)
|
||||
service.backdoor_port = self.backdoor_port
|
||||
self.services.add(service)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Wait until all services have been stopped, and then return.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.wait()
|
||||
|
||||
def restart(self):
|
||||
"""Reload config files and restart service.
|
||||
|
||||
:returns: The return value from reload_config_files or
|
||||
mutate_config_files, according to the restart_method.
|
||||
"""
|
||||
if self.restart_method == 'reload':
|
||||
self.conf.reload_config_files()
|
||||
else: # self.restart_method == 'mutate'
|
||||
self.conf.mutate_config_files()
|
||||
self.services.restart()
|
||||
|
||||
|
||||
class SignalExit(SystemExit):
|
||||
def __init__(self, signo, exccode=1):
|
||||
super().__init__(exccode)
|
||||
self.signo = signo
|
||||
|
||||
|
||||
class ServiceLauncher(Launcher):
|
||||
"""Runs one or more service in a parent process."""
|
||||
def __init__(self, conf, restart_method='reload'):
|
||||
"""Constructor.
|
||||
|
||||
:param conf: an instance of ConfigOpts
|
||||
:param restart_method: passed to super
|
||||
"""
|
||||
super().__init__(
|
||||
conf, restart_method=restart_method)
|
||||
self.signal_handler = SignalHandler()
|
||||
|
||||
def _graceful_shutdown(self, *args):
|
||||
self.signal_handler.clear()
|
||||
if (self.conf.graceful_shutdown_timeout and
|
||||
self.signal_handler.is_signal_supported('SIGALRM')):
|
||||
signal.alarm(self.conf.graceful_shutdown_timeout)
|
||||
self.stop()
|
||||
|
||||
def _reload_service(self, *args):
|
||||
self.signal_handler.clear()
|
||||
raise SignalExit(signal.SIGHUP)
|
||||
|
||||
def _fast_exit(self, *args):
|
||||
LOG.info('Caught SIGINT signal, instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def _on_timeout_exit(self, *args):
|
||||
LOG.info('Graceful shutdown timeout exceeded, '
|
||||
'instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def handle_signal(self):
|
||||
"""Set self._handle_signal as a signal handler."""
|
||||
self.signal_handler.clear()
|
||||
self.signal_handler.add_handler('SIGTERM', self._graceful_shutdown)
|
||||
self.signal_handler.add_handler('SIGINT', self._fast_exit)
|
||||
self.signal_handler.add_handler('SIGHUP', self._reload_service)
|
||||
self.signal_handler.add_handler('SIGALRM', self._on_timeout_exit)
|
||||
|
||||
def _wait_for_exit_or_signal(self):
|
||||
status = None
|
||||
signo = 0
|
||||
|
||||
if self.conf.log_options:
|
||||
LOG.debug('Full set of CONF:')
|
||||
self.conf.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
super().wait()
|
||||
except SignalExit as exc:
|
||||
signame = self.signal_handler.signals_to_name[exc.signo]
|
||||
LOG.info('Caught %s, handling', signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
self.stop()
|
||||
status = exc.code
|
||||
except Exception:
|
||||
self.stop()
|
||||
return status, signo
|
||||
|
||||
def wait(self):
|
||||
"""Wait for a service to terminate and restart it on SIGHUP.
|
||||
|
||||
:returns: termination status
|
||||
"""
|
||||
systemd.notify_once()
|
||||
self.signal_handler.clear()
|
||||
while True:
|
||||
self.handle_signal()
|
||||
status, signo = self._wait_for_exit_or_signal()
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
break
|
||||
self.restart()
|
||||
|
||||
super().wait()
|
||||
return status
|
||||
|
||||
|
||||
class ServiceWrapper:
|
||||
def __init__(self, service, workers):
|
||||
self.service = service
|
||||
self.workers = workers
|
||||
self.children = set()
|
||||
self.forktimes = []
|
||||
|
||||
|
||||
class ProcessLauncher:
|
||||
"""Launch a service with a given number of workers."""
|
||||
|
||||
def __init__(self, conf, wait_interval=0.01, restart_method='reload'):
|
||||
"""Constructor.
|
||||
|
||||
:param conf: an instance of ConfigOpts
|
||||
:param wait_interval: The interval to sleep for between checks
|
||||
of child process exit.
|
||||
:param restart_method: If 'reload', calls reload_config_files on
|
||||
SIGHUP. If 'mutate', calls mutate_config_files on SIGHUP. Other
|
||||
values produce a ValueError.
|
||||
"""
|
||||
self.conf = conf
|
||||
conf.register_opts(_options.service_opts)
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
self.wait_interval = wait_interval
|
||||
self.launcher = None
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
self.signal_handler = SignalHandler()
|
||||
self.handle_signal()
|
||||
self.restart_method = restart_method
|
||||
if restart_method not in _LAUNCHER_RESTART_METHODS:
|
||||
raise ValueError(_("Invalid restart_method: %s") % restart_method)
|
||||
|
||||
def handle_signal(self):
|
||||
"""Add instance's signal handlers to class handlers."""
|
||||
self.signal_handler.add_handler('SIGTERM', self._handle_term)
|
||||
self.signal_handler.add_handler('SIGHUP', self._handle_hup)
|
||||
self.signal_handler.add_handler('SIGINT', self._fast_exit)
|
||||
self.signal_handler.add_handler('SIGALRM', self._on_alarm_exit)
|
||||
|
||||
def _handle_term(self, signo, frame):
|
||||
"""Handle a TERM event.
|
||||
|
||||
:param signo: signal number
|
||||
:param frame: current stack frame
|
||||
"""
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
self.signal_handler.clear()
|
||||
|
||||
def _handle_hup(self, signo, frame):
|
||||
"""Handle a HUP event.
|
||||
|
||||
:param signo: signal number
|
||||
:param frame: current stack frame
|
||||
"""
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Do NOT clear the signal_handler, allowing multiple SIGHUPs to be
|
||||
# received swiftly. If a non-HUP is received before #wait loops, the
|
||||
# second event will "overwrite" the HUP. This is fine.
|
||||
|
||||
def _fast_exit(self, signo, frame):
|
||||
LOG.info('Caught SIGINT signal, instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def _on_alarm_exit(self, signo, frame):
|
||||
LOG.info('Graceful shutdown timeout exceeded, '
|
||||
'instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read(1)
|
||||
|
||||
LOG.info('Parent process has died unexpectedly, exiting')
|
||||
|
||||
if self.launcher:
|
||||
self.launcher.stop()
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
def _child_process_handle_signal(self):
|
||||
# Setup child signal handlers differently
|
||||
|
||||
def _sigterm(*args):
|
||||
self.signal_handler.clear()
|
||||
self.launcher.stop()
|
||||
|
||||
def _sighup(*args):
|
||||
self.signal_handler.clear()
|
||||
raise SignalExit(signal.SIGHUP)
|
||||
|
||||
self.signal_handler.clear()
|
||||
|
||||
# Parent signals with SIGTERM when it wants us to go away.
|
||||
self.signal_handler.add_handler('SIGTERM', _sigterm)
|
||||
self.signal_handler.add_handler('SIGHUP', _sighup)
|
||||
self.signal_handler.add_handler('SIGINT', self._fast_exit)
|
||||
|
||||
def _child_wait_for_exit_or_signal(self, launcher):
|
||||
status = 0
|
||||
signo = 0
|
||||
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
try:
|
||||
launcher.wait()
|
||||
except SignalExit as exc:
|
||||
signame = self.signal_handler.signals_to_name[exc.signo]
|
||||
LOG.info('Child caught %s, handling', signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
launcher.stop()
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
launcher.stop()
|
||||
LOG.exception('Unhandled exception')
|
||||
status = 2
|
||||
|
||||
return status, signo
|
||||
|
||||
def _child_process(self, service):
|
||||
self._child_process_handle_signal()
|
||||
|
||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||
# fd with parent and/or siblings, which would be bad
|
||||
eventlet.hubs.use_hub()
|
||||
|
||||
# Close write to ensure only parent has it open
|
||||
os.close(self.writepipe)
|
||||
# Create greenthread to watch for parent to close pipe
|
||||
eventlet.spawn_n(self._pipe_watcher)
|
||||
|
||||
# Reseed random number generator
|
||||
random.seed()
|
||||
|
||||
launcher = Launcher(self.conf, restart_method=self.restart_method)
|
||||
launcher.launch_service(service)
|
||||
return launcher
|
||||
|
||||
def _start_child(self, wrap):
|
||||
if len(wrap.forktimes) > wrap.workers:
|
||||
# Limit ourselves to one process a second (over the period of
|
||||
# number of workers * 1 second). This will allow workers to
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info('Forking too fast, sleeping')
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
|
||||
wrap.forktimes.append(time.time())
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# When parent used native threads the library on child needs to be
|
||||
# "reset", otherwise native threads won't work on the child.
|
||||
tpool.killall()
|
||||
self.launcher = self._child_process(wrap.service)
|
||||
while True:
|
||||
self._child_process_handle_signal()
|
||||
status, signo = self._child_wait_for_exit_or_signal(
|
||||
self.launcher)
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
self.launcher.wait()
|
||||
break
|
||||
self.launcher.restart()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.debug('Started child %d', pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
|
||||
return pid
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
"""Launch a service with a given number of workers.
|
||||
|
||||
:param service: a service to launch, must be an instance of
|
||||
:class:`oslo_service.service.ServiceBase`
|
||||
:param workers: a number of processes in which a service
|
||||
will be running
|
||||
"""
|
||||
_check_service_base(service)
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
# Hide existing objects from the garbage collector, so that most
|
||||
# existing pages will remain in shared memory rather than being
|
||||
# duplicated between subprocesses in the GC mark-and-sweep. (Requires
|
||||
# Python 3.7 or later.)
|
||||
if hasattr(gc, 'freeze'):
|
||||
gc.freeze()
|
||||
|
||||
LOG.info('Starting %d workers', wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
return None
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info('Child %(pid)d killed by signal %(sig)d',
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info('Child %(pid)s exited with status %(code)d',
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning('pid %d not in child list', pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
wrap.children.remove(pid)
|
||||
return wrap
|
||||
|
||||
def _respawn_children(self):
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(self.wait_interval)
|
||||
continue
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
systemd.notify_once()
|
||||
if self.conf.log_options:
|
||||
LOG.debug('Full set of CONF:')
|
||||
self.conf.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
while True:
|
||||
self.handle_signal()
|
||||
self._respawn_children()
|
||||
# No signal means that stop was called. Don't clean up here.
|
||||
if not self.sigcaught:
|
||||
return
|
||||
|
||||
signame = self.signal_handler.signals_to_name[self.sigcaught]
|
||||
LOG.info('Caught %s, stopping children', signame)
|
||||
if not _is_sighup_and_daemon(self.sigcaught):
|
||||
break
|
||||
|
||||
child_signal = signal.SIGTERM
|
||||
if self.restart_method == 'reload':
|
||||
self.conf.reload_config_files()
|
||||
elif self.restart_method == 'mutate':
|
||||
self.conf.mutate_config_files()
|
||||
child_signal = signal.SIGHUP
|
||||
for service in {
|
||||
wrap.service for wrap in self.children.values()}:
|
||||
service.reset()
|
||||
|
||||
for pid in self.children:
|
||||
os.kill(pid, child_signal)
|
||||
|
||||
self.running = True
|
||||
self.sigcaught = None
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
LOG.info("Wait called after thread killed. Cleaning up.")
|
||||
|
||||
# if we are here it means that we are trying to do graceful shutdown.
|
||||
# add alarm watching that graceful_shutdown_timeout is not exceeded
|
||||
if (self.conf.graceful_shutdown_timeout and
|
||||
self.signal_handler.is_signal_supported('SIGALRM')):
|
||||
signal.alarm(self.conf.graceful_shutdown_timeout)
|
||||
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
"""Terminate child processes and wait on each."""
|
||||
self.running = False
|
||||
|
||||
LOG.debug("Stop services.")
|
||||
for service in {
|
||||
wrap.service for wrap in self.children.values()}:
|
||||
service.stop()
|
||||
|
||||
LOG.debug("Killing children.")
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info('Waiting on %d children to exit', len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
|
||||
class Service(ServiceBase):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup(threads)
|
||||
|
||||
def reset(self):
|
||||
"""Reset a service in case it received a SIGHUP."""
|
||||
|
||||
def start(self):
|
||||
"""Start a service."""
|
||||
|
||||
def stop(self, graceful=False):
|
||||
"""Stop a service.
|
||||
|
||||
:param graceful: indicates whether to wait for all threads to finish
|
||||
or terminate them instantly
|
||||
"""
|
||||
self.tg.stop(graceful)
|
||||
|
||||
def wait(self):
|
||||
"""Wait for a service to shut down."""
|
||||
self.tg.wait()
|
||||
|
||||
|
||||
class Services:
|
||||
|
||||
def __init__(self, restart_method='reload'):
|
||||
if restart_method not in _LAUNCHER_RESTART_METHODS:
|
||||
raise ValueError(_("Invalid restart_method: %s") % restart_method)
|
||||
self.restart_method = restart_method
|
||||
self.services = []
|
||||
self.tg = threadgroup.ThreadGroup()
|
||||
self.done = event.Event()
|
||||
|
||||
def add(self, service):
|
||||
"""Add a service to a list and create a thread to run it.
|
||||
|
||||
:param service: service to run
|
||||
"""
|
||||
self.services.append(service)
|
||||
self.tg.add_thread(self.run_service, service, self.done)
|
||||
|
||||
def stop(self):
|
||||
"""Wait for graceful shutdown of services and kill the threads."""
|
||||
for service in self.services:
|
||||
service.stop()
|
||||
|
||||
# Each service has performed cleanup, now signal that the run_service
|
||||
# wrapper threads can now die:
|
||||
if not self.done.ready():
|
||||
self.done.send()
|
||||
|
||||
# reap threads:
|
||||
self.tg.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Wait for services to shut down."""
|
||||
for service in self.services:
|
||||
service.wait()
|
||||
self.tg.wait()
|
||||
|
||||
def restart(self):
|
||||
"""Reset services.
|
||||
|
||||
The behavior of this function varies depending on the value of the
|
||||
restart_method member. If the restart_method is `reload`, then it
|
||||
will stop the services, reset them, and start them in new threads.
|
||||
If the restart_method is `mutate`, then it will just reset the
|
||||
services without restarting them.
|
||||
"""
|
||||
if self.restart_method == 'reload':
|
||||
self.stop()
|
||||
self.done = event.Event()
|
||||
for restart_service in self.services:
|
||||
restart_service.reset()
|
||||
if self.restart_method == 'reload':
|
||||
self.tg.add_thread(self.run_service,
|
||||
restart_service,
|
||||
self.done)
|
||||
|
||||
@staticmethod
|
||||
def run_service(service, done):
|
||||
"""Service start wrapper.
|
||||
|
||||
:param service: service to run
|
||||
:param done: event to wait on until a shutdown is triggered
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
try:
|
||||
service.start()
|
||||
except Exception:
|
||||
LOG.exception('Error starting thread.')
|
||||
raise SystemExit(1)
|
||||
else:
|
||||
done.wait()
|
||||
|
||||
|
||||
def launch(conf, service, workers=1, restart_method='reload'):
|
||||
"""Launch a service with a given number of workers.
|
||||
|
||||
:param conf: an instance of ConfigOpts
|
||||
:param service: a service to launch, must be an instance of
|
||||
:class:`oslo_service.service.ServiceBase`
|
||||
:param workers: a number of processes in which a service will be running,
|
||||
type should be int.
|
||||
:param restart_method: Passed to the constructed launcher. If 'reload', the
|
||||
launcher will call reload_config_files on SIGHUP. If 'mutate', it will
|
||||
call mutate_config_files on SIGHUP. Other values produce a ValueError.
|
||||
:returns: instance of a launcher that was used to launch the service
|
||||
"""
|
||||
|
||||
if workers is not None and not isinstance(workers, int):
|
||||
raise TypeError(_("Type of workers should be int!"))
|
||||
|
||||
if workers is not None and workers <= 0:
|
||||
raise ValueError(_("Number of workers should be positive!"))
|
||||
|
||||
if workers is None or workers == 1:
|
||||
launcher = ServiceLauncher(conf, restart_method=restart_method)
|
||||
else:
|
||||
launcher = ProcessLauncher(conf, restart_method=restart_method)
|
||||
launcher.launch_service(service, workers=workers)
|
||||
|
||||
return launcher
|
430
oslo_service/backend/eventlet/threadgroup.py
Normal file
430
oslo_service/backend/eventlet/threadgroup.py
Normal file
@ -0,0 +1,430 @@
|
||||
# Copyright 2012 Red Hat, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import warnings
|
||||
|
||||
from debtcollector import removals
|
||||
import eventlet
|
||||
from eventlet import greenpool
|
||||
|
||||
from oslo_service.backend.eventlet import loopingcall
|
||||
from oslo_utils import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _on_thread_done(_greenthread, group, thread):
|
||||
"""Callback function to be passed to GreenThread.link() when we spawn().
|
||||
|
||||
Calls the :class:`ThreadGroup` to notify it to remove this thread from
|
||||
the associated group.
|
||||
"""
|
||||
group.thread_done(thread)
|
||||
|
||||
|
||||
class Thread:
|
||||
"""Wrapper around a greenthread.
|
||||
|
||||
Holds a reference to the :class:`ThreadGroup`. The Thread will notify
|
||||
the :class:`ThreadGroup` when it has done so it can be removed from
|
||||
the threads list.
|
||||
"""
|
||||
def __init__(self, thread, group, link=True):
|
||||
self.thread = thread
|
||||
if link:
|
||||
self.thread.link(_on_thread_done, group, self)
|
||||
self._ident = id(thread)
|
||||
|
||||
@property
|
||||
def ident(self):
|
||||
return self._ident
|
||||
|
||||
def stop(self):
|
||||
"""Kill the thread by raising GreenletExit within it."""
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
"""Block until the thread completes and return the result."""
|
||||
return self.thread.wait()
|
||||
|
||||
def link(self, func, *args, **kwargs):
|
||||
"""Schedule a function to be run upon completion of the thread."""
|
||||
self.thread.link(func, *args, **kwargs)
|
||||
|
||||
def cancel(self, *throw_args):
|
||||
"""Prevent the thread from starting if it has not already done so.
|
||||
|
||||
:param throw_args: the `exc_info` data to raise from :func:`wait`.
|
||||
"""
|
||||
self.thread.cancel(*throw_args)
|
||||
|
||||
|
||||
class ThreadGroup:
|
||||
"""A group of greenthreads and timers.
|
||||
|
||||
The point of the ThreadGroup class is to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
|
||||
.. note::
|
||||
The API is inconsistent, confusing, and not orthogonal. The same verbs
|
||||
often mean different things when applied to timers and threads,
|
||||
respectively. Read the documentation carefully.
|
||||
"""
|
||||
|
||||
def __init__(self, thread_pool_size=10):
|
||||
"""Create a ThreadGroup with a pool of greenthreads.
|
||||
|
||||
:param thread_pool_size: the maximum number of threads allowed to run
|
||||
concurrently.
|
||||
"""
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
"""Add a timer that controls its own period dynamically.
|
||||
|
||||
The period of each iteration of the timer is controlled by the return
|
||||
value of the callback function on the previous iteration.
|
||||
|
||||
.. warning::
|
||||
Passing arguments to the callback function is deprecated. Use the
|
||||
:func:`add_dynamic_timer_args` method to pass arguments for the
|
||||
callback function.
|
||||
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:param periodic_interval_max: The maximum interval in seconds to allow
|
||||
the callback function to request. If
|
||||
provided, this is also used as the
|
||||
default delay if None is returned by the
|
||||
callback function.
|
||||
:returns: an :class:`oslo_service.loopingcall.DynamicLoopingCall`
|
||||
instance
|
||||
"""
|
||||
if args or kwargs:
|
||||
warnings.warn("Calling add_dynamic_timer() with arguments to the "
|
||||
"callback function is deprecated. Use "
|
||||
"add_dynamic_timer_args() instead.",
|
||||
DeprecationWarning)
|
||||
return self.add_dynamic_timer_args(
|
||||
callback, args, kwargs,
|
||||
initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
|
||||
def add_dynamic_timer_args(self, callback, args=None, kwargs=None,
|
||||
initial_delay=None, periodic_interval_max=None,
|
||||
stop_on_exception=True):
|
||||
"""Add a timer that controls its own period dynamically.
|
||||
|
||||
The period of each iteration of the timer is controlled by the return
|
||||
value of the callback function on the previous iteration.
|
||||
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param args: A list of positional args to the callback function.
|
||||
:param kwargs: A dict of keyword args to the callback function.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:param periodic_interval_max: The maximum interval in seconds to allow
|
||||
the callback function to request. If
|
||||
provided, this is also used as the
|
||||
default delay if None is returned by the
|
||||
callback function.
|
||||
:param stop_on_exception: Pass ``False`` to have the timer continue
|
||||
running even if the callback function raises
|
||||
an exception.
|
||||
:returns: an :class:`oslo_service.loopingcall.DynamicLoopingCall`
|
||||
instance
|
||||
"""
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max,
|
||||
stop_on_exception=stop_on_exception)
|
||||
self.timers.append(timer)
|
||||
return timer
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
"""Add a timer with a fixed period.
|
||||
|
||||
.. warning::
|
||||
Passing arguments to the callback function is deprecated. Use the
|
||||
:func:`add_timer_args` method to pass arguments for the callback
|
||||
function.
|
||||
|
||||
:param interval: The minimum period in seconds between calls to the
|
||||
callback function.
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:returns: an :class:`oslo_service.loopingcall.FixedIntervalLoopingCall`
|
||||
instance
|
||||
"""
|
||||
if args or kwargs:
|
||||
warnings.warn("Calling add_timer() with arguments to the callback "
|
||||
"function is deprecated. Use add_timer_args() "
|
||||
"instead.",
|
||||
DeprecationWarning)
|
||||
return self.add_timer_args(interval, callback, args, kwargs,
|
||||
initial_delay=initial_delay)
|
||||
|
||||
def add_timer_args(self, interval, callback, args=None, kwargs=None,
|
||||
initial_delay=None, stop_on_exception=True):
|
||||
"""Add a timer with a fixed period.
|
||||
|
||||
:param interval: The minimum period in seconds between calls to the
|
||||
callback function.
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param args: A list of positional args to the callback function.
|
||||
:param kwargs: A dict of keyword args to the callback function.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:param stop_on_exception: Pass ``False`` to have the timer continue
|
||||
running even if the callback function raises
|
||||
an exception.
|
||||
:returns: an :class:`oslo_service.loopingcall.FixedIntervalLoopingCall`
|
||||
instance
|
||||
"""
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
self.timers.append(pulse)
|
||||
return pulse
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
"""Spawn a new thread.
|
||||
|
||||
This call will block until capacity is available in the thread pool.
|
||||
After that, it returns immediately (i.e. *before* the new thread is
|
||||
scheduled).
|
||||
|
||||
:param callback: the function to run in the new thread.
|
||||
:param args: positional arguments to the callback function.
|
||||
:param kwargs: keyword arguments to the callback function.
|
||||
:returns: a :class:`Thread` object
|
||||
"""
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self, link=False)
|
||||
self.threads.append(th)
|
||||
gt.link(_on_thread_done, self, th)
|
||||
return th
|
||||
|
||||
def thread_done(self, thread):
|
||||
"""Remove a completed thread from the group.
|
||||
|
||||
This method is automatically called on completion of a thread in the
|
||||
group, and should not be called explicitly.
|
||||
"""
|
||||
self.threads.remove(thread)
|
||||
|
||||
def timer_done(self, timer):
|
||||
"""Remove a timer from the group.
|
||||
|
||||
:param timer: The timer object returned from :func:`add_timer` or its
|
||||
analogues.
|
||||
"""
|
||||
self.timers.remove(timer)
|
||||
|
||||
def _perform_action_on_threads(self, action_func, on_error_func):
|
||||
current = threading.current_thread()
|
||||
# Iterate over a copy of self.threads so thread_done doesn't
|
||||
# modify the list while we're iterating
|
||||
for x in self.threads[:]:
|
||||
if x.ident == current.ident:
|
||||
# Don't perform actions on the current thread.
|
||||
continue
|
||||
try:
|
||||
action_func(x)
|
||||
except eventlet.greenlet.GreenletExit: # nosec
|
||||
# greenlet exited successfully
|
||||
pass
|
||||
except Exception:
|
||||
on_error_func(x)
|
||||
|
||||
def _stop_threads(self):
|
||||
self._perform_action_on_threads(
|
||||
lambda x: x.stop(),
|
||||
lambda x: LOG.exception('Error stopping thread.'))
|
||||
|
||||
def stop_timers(self, wait=False):
|
||||
"""Stop all timers in the group and remove them from the group.
|
||||
|
||||
No new invocations of timers will be triggered after they are stopped,
|
||||
but calls that are in progress will not be interrupted.
|
||||
|
||||
To wait for in-progress calls to complete, pass ``wait=True`` - calling
|
||||
:func:`wait` will not have the desired effect as the timers will have
|
||||
already been removed from the group.
|
||||
|
||||
:param wait: If true, block until all timers have been stopped before
|
||||
returning.
|
||||
"""
|
||||
for timer in self.timers:
|
||||
timer.stop()
|
||||
if wait:
|
||||
self._wait_timers()
|
||||
self.timers = []
|
||||
|
||||
def stop(self, graceful=False):
|
||||
"""Stop all timers and threads in the group.
|
||||
|
||||
No new invocations of timers will be triggered after they are stopped,
|
||||
but calls that are in progress will not be interrupted.
|
||||
|
||||
If ``graceful`` is false, kill all threads immediately by raising
|
||||
GreenletExit. Note that in this case, this method will **not** block
|
||||
until all threads and running timer callbacks have actually exited. To
|
||||
guarantee that all threads have exited, call :func:`wait`.
|
||||
|
||||
If ``graceful`` is true, do not kill threads. Block until all threads
|
||||
and running timer callbacks have completed. This is equivalent to
|
||||
calling :func:`stop_timers` with ``wait=True`` followed by
|
||||
:func:`wait`.
|
||||
|
||||
:param graceful: If true, block until all timers have stopped and all
|
||||
threads completed; never kill threads. Otherwise,
|
||||
kill threads immediately and return immediately even
|
||||
if there are timer callbacks still running.
|
||||
"""
|
||||
self.stop_timers(wait=graceful)
|
||||
if graceful:
|
||||
# In case of graceful=True, wait for all threads to be
|
||||
# finished, never kill threads
|
||||
self._wait_threads()
|
||||
else:
|
||||
# In case of graceful=False(Default), kill threads
|
||||
# immediately
|
||||
self._stop_threads()
|
||||
|
||||
def _wait_timers(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except eventlet.greenlet.GreenletExit: # nosec
|
||||
# greenlet exited successfully
|
||||
pass
|
||||
except Exception:
|
||||
LOG.exception('Error waiting on timer.')
|
||||
|
||||
def _wait_threads(self):
|
||||
self._perform_action_on_threads(
|
||||
lambda x: x.wait(),
|
||||
lambda x: LOG.exception('Error waiting on thread.'))
|
||||
|
||||
def wait(self):
|
||||
"""Block until all timers and threads in the group are complete.
|
||||
|
||||
.. note::
|
||||
Before calling this method, any timers should be stopped first by
|
||||
calling :func:`stop_timers`, :func:`stop`, or :func:`cancel` with a
|
||||
``timeout`` argument. Otherwise this will block forever.
|
||||
|
||||
.. note::
|
||||
Calling :func:`stop_timers` removes the timers from the group, so a
|
||||
subsequent call to this method will not wait for any in-progress
|
||||
timer calls to complete.
|
||||
|
||||
Any exceptions raised by the threads will be logged but suppressed.
|
||||
|
||||
.. note::
|
||||
This call guarantees only that the threads themselves have
|
||||
completed, **not** that any cleanup functions added via
|
||||
:func:`Thread.link` have completed.
|
||||
"""
|
||||
self._wait_timers()
|
||||
self._wait_threads()
|
||||
|
||||
def _any_threads_alive(self):
|
||||
current = threading.current_thread()
|
||||
for x in self.threads[:]:
|
||||
if x.ident == current.ident:
|
||||
# Don't check current thread.
|
||||
continue
|
||||
if not x.thread.dead:
|
||||
return True
|
||||
return False
|
||||
|
||||
@removals.remove(removal_version='?')
|
||||
def cancel(self, *throw_args, **kwargs):
|
||||
"""Cancel unstarted threads in the group, and optionally stop the rest.
|
||||
|
||||
.. warning::
|
||||
This method is deprecated and should not be used. It will be
|
||||
removed in a future release.
|
||||
|
||||
If called without the ``timeout`` argument, this does **not** stop any
|
||||
running threads, but prevents any threads in the group that have not
|
||||
yet started from running, then returns immediately. Timers are not
|
||||
affected.
|
||||
|
||||
If the 'timeout' argument is supplied, then it serves as a grace period
|
||||
to allow running threads to finish. After the timeout, any threads in
|
||||
the group that are still running will be killed by raising GreenletExit
|
||||
in them, and all timers will be stopped (so that they are not
|
||||
retriggered - timer calls that are in progress will not be
|
||||
interrupted). This method will **not** block until all threads have
|
||||
actually exited, nor that all in-progress timer calls have completed.
|
||||
To guarantee that all threads have exited, call :func:`wait`. If all
|
||||
threads complete before the timeout expires, timers will be left
|
||||
running; there is no way to then stop those timers, so for consistent
|
||||
behaviour :func`stop_timers` should be called before calling this
|
||||
method.
|
||||
|
||||
:param throw_args: the `exc_info` data to raise from
|
||||
:func:`Thread.wait` for any of the unstarted
|
||||
threads. (Though note that :func:`ThreadGroup.wait`
|
||||
suppresses exceptions.)
|
||||
:param timeout: time to wait for running threads to complete before
|
||||
calling stop(). If not supplied, threads that are
|
||||
already running continue to completion.
|
||||
:param wait_time: length of time in seconds to sleep between checks of
|
||||
whether any threads are still alive. (Default 1s.)
|
||||
"""
|
||||
self._perform_action_on_threads(
|
||||
lambda x: x.cancel(*throw_args),
|
||||
lambda x: LOG.exception('Error canceling thread.'))
|
||||
|
||||
timeout = kwargs.get('timeout', None)
|
||||
if timeout is None:
|
||||
return
|
||||
wait_time = kwargs.get('wait_time', 1)
|
||||
watch = timeutils.StopWatch(duration=timeout)
|
||||
watch.start()
|
||||
while self._any_threads_alive():
|
||||
if not watch.expired():
|
||||
eventlet.sleep(wait_time)
|
||||
continue
|
||||
LOG.debug("Cancel timeout reached, stopping threads.")
|
||||
self.stop()
|
@ -31,6 +31,19 @@ from eventlet.green import socket
|
||||
from oslo_service._i18n import _
|
||||
from oslo_service import _options
|
||||
|
||||
from debtcollector import removals
|
||||
|
||||
removals.removed_module(
|
||||
__name__,
|
||||
replacement=None,
|
||||
removal_version="2026.2",
|
||||
message=(
|
||||
"The 'eventlet_backdoor' module is deprecated and will be removed in "
|
||||
"version 2026.2. This module is not being replaced. Please migrate "
|
||||
"away from using it and remove any dependencies on this module."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -12,6 +12,18 @@
|
||||
|
||||
import fixtures
|
||||
|
||||
from debtcollector import removals
|
||||
|
||||
removals.removed_module(
|
||||
__name__,
|
||||
replacement=None,
|
||||
removal_version="2026.2",
|
||||
message=(
|
||||
"The 'oslo_service.fixture' module is deprecated and will be removed"
|
||||
" in version 2026.2."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class SleepFixture(fixtures.Fixture):
|
||||
"""A fixture for mocking the ``wait()`` within :doc:`loopingcall` events.
|
||||
|
@ -15,424 +15,17 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import random
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import event
|
||||
from eventlet import greenthread
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import eventletutils
|
||||
from oslo_utils import excutils
|
||||
from oslo_utils import reflection
|
||||
from oslo_utils import timeutils
|
||||
from oslo_service.backend import get_component
|
||||
|
||||
from oslo_service._i18n import _
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoopingCallDone(Exception):
|
||||
"""Exception to break out and stop a LoopingCallBase.
|
||||
|
||||
The poll-function passed to LoopingCallBase can raise this exception to
|
||||
break out of the loop normally. This is somewhat analogous to
|
||||
StopIteration.
|
||||
|
||||
An optional return-value can be included as the argument to the exception;
|
||||
this return-value will be returned by LoopingCallBase.wait()
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, retvalue=True):
|
||||
""":param retvalue: Value that LoopingCallBase.wait() should return."""
|
||||
self.retvalue = retvalue
|
||||
|
||||
|
||||
class LoopingCallTimeOut(Exception):
|
||||
"""Exception for a timed out LoopingCall.
|
||||
|
||||
The LoopingCall will raise this exception when a timeout is provided
|
||||
and it is exceeded.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def _safe_wrapper(f, kind, func_name):
|
||||
"""Wrapper that calls into wrapped function and logs errors as needed."""
|
||||
|
||||
def func(*args, **kwargs):
|
||||
try:
|
||||
return f(*args, **kwargs)
|
||||
except LoopingCallDone:
|
||||
raise # let the outer handler process this
|
||||
except Exception:
|
||||
LOG.error('%(kind)s %(func_name)r failed',
|
||||
{'kind': kind, 'func_name': func_name},
|
||||
exc_info=True)
|
||||
return 0
|
||||
|
||||
return func
|
||||
|
||||
|
||||
class LoopingCallBase:
|
||||
_KIND = _("Unknown looping call")
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A looping call can only run one function"
|
||||
" at a time")
|
||||
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
self.args = args
|
||||
self.kw = kw
|
||||
self.f = f
|
||||
self._thread = None
|
||||
self.done = None
|
||||
self._abort = eventletutils.EventletEvent()
|
||||
|
||||
@property
|
||||
def _running(self):
|
||||
return not self._abort.is_set()
|
||||
|
||||
def stop(self):
|
||||
if self._running:
|
||||
self._abort.set()
|
||||
|
||||
def wait(self):
|
||||
return self.done.wait()
|
||||
|
||||
def _on_done(self, gt, *args, **kwargs):
|
||||
self._thread = None
|
||||
|
||||
def _sleep(self, timeout):
|
||||
self._abort.wait(timeout)
|
||||
|
||||
def _start(self, idle_for, initial_delay=None, stop_on_exception=True):
|
||||
"""Start the looping
|
||||
|
||||
:param idle_for: Callable that takes two positional arguments, returns
|
||||
how long to idle for. The first positional argument is
|
||||
the last result from the function being looped and the
|
||||
second positional argument is the time it took to
|
||||
calculate that result.
|
||||
:param initial_delay: How long to delay before starting the looping.
|
||||
Value is in seconds.
|
||||
:param stop_on_exception: Whether to stop if an exception occurs.
|
||||
:returns: eventlet event instance
|
||||
"""
|
||||
if self._thread is not None:
|
||||
raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
|
||||
self.done = event.Event()
|
||||
self._abort.clear()
|
||||
self._thread = greenthread.spawn(
|
||||
self._run_loop, idle_for,
|
||||
initial_delay=initial_delay, stop_on_exception=stop_on_exception)
|
||||
self._thread.link(self._on_done)
|
||||
return self.done
|
||||
|
||||
# NOTE(bnemec): This is just a wrapper function we can mock so we aren't
|
||||
# affected by other users of the StopWatch class.
|
||||
def _elapsed(self, watch):
|
||||
return watch.elapsed()
|
||||
|
||||
def _run_loop(self, idle_for_func,
|
||||
initial_delay=None, stop_on_exception=True):
|
||||
kind = self._KIND
|
||||
func_name = reflection.get_callable_name(self.f)
|
||||
func = self.f if stop_on_exception else _safe_wrapper(self.f, kind,
|
||||
func_name)
|
||||
if initial_delay:
|
||||
self._sleep(initial_delay)
|
||||
try:
|
||||
watch = timeutils.StopWatch()
|
||||
while self._running:
|
||||
watch.restart()
|
||||
result = func(*self.args, **self.kw)
|
||||
watch.stop()
|
||||
if not self._running:
|
||||
break
|
||||
idle = idle_for_func(result, self._elapsed(watch))
|
||||
LOG.trace('%(kind)s %(func_name)r sleeping '
|
||||
'for %(idle).02f seconds',
|
||||
{'func_name': func_name, 'idle': idle,
|
||||
'kind': kind})
|
||||
self._sleep(idle)
|
||||
except LoopingCallDone as e:
|
||||
self.done.send(e.retvalue)
|
||||
except Exception:
|
||||
exc_info = sys.exc_info()
|
||||
try:
|
||||
LOG.error('%(kind)s %(func_name)r failed',
|
||||
{'kind': kind, 'func_name': func_name},
|
||||
exc_info=exc_info)
|
||||
self.done.send_exception(*exc_info)
|
||||
finally:
|
||||
del exc_info
|
||||
return
|
||||
else:
|
||||
self.done.send(True)
|
||||
|
||||
|
||||
class FixedIntervalLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call."""
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call can only run"
|
||||
" one function at a time")
|
||||
|
||||
_KIND = _('Fixed interval looping call')
|
||||
|
||||
def start(self, interval, initial_delay=None, stop_on_exception=True):
|
||||
def _idle_for(result, elapsed):
|
||||
delay = round(elapsed - interval, 2)
|
||||
if delay > 0:
|
||||
func_name = reflection.get_callable_name(self.f)
|
||||
LOG.warning('Function %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec',
|
||||
{'func_name': func_name, 'delay': delay})
|
||||
return -delay if delay < 0 else 0
|
||||
return self._start(_idle_for, initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
|
||||
|
||||
class FixedIntervalWithTimeoutLoopingCall(LoopingCallBase):
|
||||
"""A fixed interval looping call with timeout checking mechanism."""
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A fixed interval looping call with timeout"
|
||||
" checking and can only run one function at"
|
||||
" at a time")
|
||||
|
||||
_KIND = _('Fixed interval looping call with timeout checking.')
|
||||
|
||||
def start(self, interval, initial_delay=None,
|
||||
stop_on_exception=True, timeout=0):
|
||||
start_time = time.time()
|
||||
|
||||
def _idle_for(result, elapsed):
|
||||
delay = round(elapsed - interval, 2)
|
||||
if delay > 0:
|
||||
func_name = reflection.get_callable_name(self.f)
|
||||
LOG.warning('Function %(func_name)r run outlasted '
|
||||
'interval by %(delay).2f sec',
|
||||
{'func_name': func_name, 'delay': delay})
|
||||
elapsed_time = time.time() - start_time
|
||||
if timeout > 0 and elapsed_time > timeout:
|
||||
raise LoopingCallTimeOut(
|
||||
_('Looping call timed out after %.02f seconds')
|
||||
% elapsed_time)
|
||||
return -delay if delay < 0 else 0
|
||||
|
||||
return self._start(_idle_for, initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
|
||||
|
||||
class DynamicLoopingCall(LoopingCallBase):
|
||||
"""A looping call which sleeps until the next known event.
|
||||
|
||||
The function called should return how long to sleep for before being
|
||||
called again.
|
||||
"""
|
||||
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A dynamic interval looping call can only run"
|
||||
" one function at a time")
|
||||
_TASK_MISSING_SLEEP_VALUE_MESSAGE = _(
|
||||
"A dynamic interval looping call should supply either an"
|
||||
" interval or periodic_interval_max"
|
||||
)
|
||||
|
||||
_KIND = _('Dynamic interval looping call')
|
||||
|
||||
def start(self, initial_delay=None, periodic_interval_max=None,
|
||||
stop_on_exception=True):
|
||||
def _idle_for(suggested_delay, elapsed):
|
||||
delay = suggested_delay
|
||||
if delay is None:
|
||||
if periodic_interval_max is not None:
|
||||
delay = periodic_interval_max
|
||||
else:
|
||||
# Note(suro-patz): An application used to receive a
|
||||
# TypeError thrown from eventlet layer, before
|
||||
# this RuntimeError was introduced.
|
||||
raise RuntimeError(
|
||||
self._TASK_MISSING_SLEEP_VALUE_MESSAGE)
|
||||
else:
|
||||
if periodic_interval_max is not None:
|
||||
delay = min(delay, periodic_interval_max)
|
||||
return delay
|
||||
|
||||
return self._start(_idle_for, initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
|
||||
|
||||
class BackOffLoopingCall(LoopingCallBase):
|
||||
"""Run a method in a loop with backoff on error.
|
||||
|
||||
The passed in function should return True (no error, return to
|
||||
initial_interval),
|
||||
False (error, start backing off), or raise LoopingCallDone(retvalue=None)
|
||||
(quit looping, return retvalue if set).
|
||||
|
||||
When there is an error, the call will backoff on each failure. The
|
||||
backoff will be equal to double the previous base interval times some
|
||||
jitter. If a backoff would put it over the timeout, it halts immediately,
|
||||
so the call will never take more than timeout, but may and likely will
|
||||
take less time.
|
||||
|
||||
When the function return value is True or False, the interval will be
|
||||
multiplied by a random jitter. If min_jitter or max_jitter is None,
|
||||
there will be no jitter (jitter=1). If min_jitter is below 0.5, the code
|
||||
may not backoff and may increase its retry rate.
|
||||
|
||||
If func constantly returns True, this function will not return.
|
||||
|
||||
To run a func and wait for a call to finish (by raising a LoopingCallDone):
|
||||
|
||||
timer = BackOffLoopingCall(func)
|
||||
response = timer.start().wait()
|
||||
|
||||
:param initial_delay: delay before first running of function
|
||||
:param starting_interval: initial interval in seconds between calls to
|
||||
function. When an error occurs and then a
|
||||
success, the interval is returned to
|
||||
starting_interval
|
||||
:param timeout: time in seconds before a LoopingCallTimeout is raised.
|
||||
The call will never take longer than timeout, but may quit
|
||||
before timeout.
|
||||
:param max_interval: The maximum interval between calls during errors
|
||||
:param jitter: Used to vary when calls are actually run to avoid group of
|
||||
calls all coming at the exact same time. Uses
|
||||
random.gauss(jitter, 0.1), with jitter as the mean for the
|
||||
distribution. If set below .5, it can cause the calls to
|
||||
come more rapidly after each failure.
|
||||
:param min_interval: The minimum interval in seconds between calls to
|
||||
function.
|
||||
:raises: LoopingCallTimeout if time spent doing error retries would exceed
|
||||
timeout.
|
||||
"""
|
||||
|
||||
_RNG = random.SystemRandom()
|
||||
_KIND = _('Dynamic backoff interval looping call')
|
||||
_RUN_ONLY_ONE_MESSAGE = _("A dynamic backoff interval looping call can"
|
||||
" only run one function at a time")
|
||||
|
||||
def __init__(self, f=None, *args, **kw):
|
||||
super().__init__(f=f, *args, **kw)
|
||||
self._error_time = 0
|
||||
self._interval = 1
|
||||
|
||||
def start(self, initial_delay=None, starting_interval=1, timeout=300,
|
||||
max_interval=300, jitter=0.75, min_interval=0.001):
|
||||
if self._thread is not None:
|
||||
raise RuntimeError(self._RUN_ONLY_ONE_MESSAGE)
|
||||
|
||||
# Reset any prior state.
|
||||
self._error_time = 0
|
||||
self._interval = starting_interval
|
||||
|
||||
def _idle_for(success, _elapsed):
|
||||
random_jitter = abs(self._RNG.gauss(jitter, 0.1))
|
||||
if success:
|
||||
# Reset error state now that it didn't error...
|
||||
self._interval = starting_interval
|
||||
self._error_time = 0
|
||||
return self._interval * random_jitter
|
||||
else:
|
||||
# Perform backoff, random jitter around the next interval
|
||||
# bounded by min_interval and max_interval.
|
||||
idle = max(self._interval * 2 * random_jitter, min_interval)
|
||||
idle = min(idle, max_interval)
|
||||
# Calculate the next interval based on the mean, so that the
|
||||
# backoff grows at the desired rate.
|
||||
self._interval = max(self._interval * 2 * jitter, min_interval)
|
||||
# Don't go over timeout, end early if necessary. If
|
||||
# timeout is 0, keep going.
|
||||
if timeout > 0 and self._error_time + idle > timeout:
|
||||
raise LoopingCallTimeOut(
|
||||
_('Looping call timed out after %.02f seconds')
|
||||
% (self._error_time + idle))
|
||||
self._error_time += idle
|
||||
return idle
|
||||
|
||||
return self._start(_idle_for, initial_delay=initial_delay)
|
||||
|
||||
|
||||
class RetryDecorator:
|
||||
"""Decorator for retrying a function upon suggested exceptions.
|
||||
|
||||
The decorated function is retried for the given number of times, and the
|
||||
sleep time between the retries is incremented until max sleep time is
|
||||
reached. If the max retry count is set to -1, then the decorated function
|
||||
is invoked indefinitely until an exception is thrown, and the caught
|
||||
exception is not in the list of suggested exceptions.
|
||||
"""
|
||||
|
||||
def __init__(self, max_retry_count=-1, inc_sleep_time=10,
|
||||
max_sleep_time=60, exceptions=()):
|
||||
"""Configure the retry object using the input params.
|
||||
|
||||
:param max_retry_count: maximum number of times the given function must
|
||||
be retried when one of the input 'exceptions'
|
||||
is caught. When set to -1, it will be retried
|
||||
indefinitely until an exception is thrown
|
||||
and the caught exception is not in param
|
||||
exceptions.
|
||||
:param inc_sleep_time: incremental time in seconds for sleep time
|
||||
between retries
|
||||
:param max_sleep_time: max sleep time in seconds beyond which the sleep
|
||||
time will not be incremented using param
|
||||
inc_sleep_time. On reaching this threshold,
|
||||
max_sleep_time will be used as the sleep time.
|
||||
:param exceptions: suggested exceptions for which the function must be
|
||||
retried, if no exceptions are provided (the default)
|
||||
then all exceptions will be reraised, and no
|
||||
retrying will be triggered.
|
||||
"""
|
||||
self._max_retry_count = max_retry_count
|
||||
self._inc_sleep_time = inc_sleep_time
|
||||
self._max_sleep_time = max_sleep_time
|
||||
self._exceptions = exceptions
|
||||
self._retry_count = 0
|
||||
self._sleep_time = 0
|
||||
|
||||
def __call__(self, f):
|
||||
func_name = reflection.get_callable_name(f)
|
||||
|
||||
def _func(*args, **kwargs):
|
||||
result = None
|
||||
try:
|
||||
if self._retry_count:
|
||||
LOG.debug("Invoking %(func_name)s; retry count is "
|
||||
"%(retry_count)d.",
|
||||
{'func_name': func_name,
|
||||
'retry_count': self._retry_count})
|
||||
result = f(*args, **kwargs)
|
||||
except self._exceptions:
|
||||
with excutils.save_and_reraise_exception() as ctxt:
|
||||
LOG.debug("Exception which is in the suggested list of "
|
||||
"exceptions occurred while invoking function:"
|
||||
" %s.",
|
||||
func_name)
|
||||
if (self._max_retry_count != -1 and
|
||||
self._retry_count >= self._max_retry_count):
|
||||
LOG.debug("Cannot retry %(func_name)s upon "
|
||||
"suggested exception "
|
||||
"since retry count (%(retry_count)d) "
|
||||
"reached max retry count "
|
||||
"(%(max_retry_count)d).",
|
||||
{'retry_count': self._retry_count,
|
||||
'max_retry_count': self._max_retry_count,
|
||||
'func_name': func_name})
|
||||
else:
|
||||
ctxt.reraise = False
|
||||
self._retry_count += 1
|
||||
self._sleep_time += self._inc_sleep_time
|
||||
return self._sleep_time
|
||||
raise LoopingCallDone(result)
|
||||
|
||||
@functools.wraps(f)
|
||||
def func(*args, **kwargs):
|
||||
loop = DynamicLoopingCall(_func, *args, **kwargs)
|
||||
evt = loop.start(periodic_interval_max=self._max_sleep_time)
|
||||
LOG.debug("Waiting for function %s to return.", func_name)
|
||||
return evt.wait()
|
||||
|
||||
return func
|
||||
# Dynamically load looping call components from the backend
|
||||
LoopingCallBase = get_component("LoopingCallBase")
|
||||
LoopingCallDone = get_component("LoopingCallDone")
|
||||
LoopingCallTimeOut = get_component("LoopingCallTimeOut")
|
||||
FixedIntervalLoopingCall = get_component("FixedIntervalLoopingCall")
|
||||
FixedIntervalWithTimeoutLoopingCall = get_component(
|
||||
"FixedIntervalWithTimeoutLoopingCall")
|
||||
DynamicLoopingCall = get_component("DynamicLoopingCall")
|
||||
BackOffLoopingCall = get_component("BackOffLoopingCall")
|
||||
RetryDecorator = get_component("RetryDecorator")
|
||||
|
@ -17,828 +17,34 @@
|
||||
|
||||
"""Generic Node base class for all workers that run on hosts."""
|
||||
|
||||
import abc
|
||||
import collections
|
||||
|
||||
import copy
|
||||
import errno
|
||||
import functools
|
||||
import gc
|
||||
import inspect
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import signal
|
||||
import sys
|
||||
import time
|
||||
|
||||
import eventlet
|
||||
from eventlet import event
|
||||
from eventlet import tpool
|
||||
from oslo_service.backend import get_component
|
||||
|
||||
from oslo_concurrency import lockutils
|
||||
from oslo_service._i18n import _
|
||||
from oslo_service import _options
|
||||
from oslo_service import eventlet_backdoor
|
||||
from oslo_service import systemd
|
||||
from oslo_service import threadgroup
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
# Dynamically expose components from the backend
|
||||
ServiceBase = get_component("ServiceBase")
|
||||
ServiceLauncher = get_component("ServiceLauncher")
|
||||
Launcher = get_component("Launcher")
|
||||
ProcessLauncher = get_component("ProcessLauncher")
|
||||
Service = get_component("Service")
|
||||
Services = get_component("Services")
|
||||
ServiceWrapper = get_component("ServiceWrapper")
|
||||
SignalHandler = get_component("SignalHandler")
|
||||
SignalExit = get_component("SignalExit")
|
||||
|
||||
_LAUNCHER_RESTART_METHODS = ['reload', 'mutate']
|
||||
# Function exports
|
||||
launch = get_component("launch")
|
||||
|
||||
# Utility functions
|
||||
_is_daemon = get_component("_is_daemon")
|
||||
_is_sighup_and_daemon = get_component("_is_sighup_and_daemon")
|
||||
|
||||
|
||||
def list_opts():
|
||||
"""Entry point for oslo-config-generator."""
|
||||
return [(None, copy.deepcopy(_options.eventlet_backdoor_opts +
|
||||
_options.service_opts))]
|
||||
|
||||
|
||||
def _is_daemon():
|
||||
# The process group for a foreground process will match the
|
||||
# process group of the controlling terminal. If those values do
|
||||
# not match, or ioctl() fails on the stdout file handle, we assume
|
||||
# the process is running in the background as a daemon.
|
||||
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
|
||||
try:
|
||||
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
|
||||
except io.UnsupportedOperation:
|
||||
# Could not get the fileno for stdout, so we must be a daemon.
|
||||
is_daemon = True
|
||||
except OSError as err:
|
||||
if err.errno == errno.ENOTTY:
|
||||
# Assume we are a daemon because there is no terminal.
|
||||
is_daemon = True
|
||||
else:
|
||||
raise
|
||||
return is_daemon
|
||||
|
||||
|
||||
def _is_sighup_and_daemon(signo):
|
||||
if not (SignalHandler().is_signal_supported('SIGHUP') and
|
||||
signo == signal.SIGHUP):
|
||||
# Avoid checking if we are a daemon, because the signal isn't
|
||||
# SIGHUP.
|
||||
return False
|
||||
return _is_daemon()
|
||||
|
||||
|
||||
def _check_service_base(service):
|
||||
if not isinstance(service, ServiceBase):
|
||||
raise TypeError(_("Service %(service)s must an instance of %(base)s!")
|
||||
% {'service': service, 'base': ServiceBase})
|
||||
|
||||
|
||||
class ServiceBase(metaclass=abc.ABCMeta):
|
||||
"""Base class for all services."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def start(self):
|
||||
"""Start service."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def stop(self):
|
||||
"""Stop service."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def wait(self):
|
||||
"""Wait for service to complete."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def reset(self):
|
||||
"""Reset service.
|
||||
|
||||
Called in case service running in daemon mode receives SIGHUP.
|
||||
"""
|
||||
|
||||
|
||||
class Singleton(type):
|
||||
_instances = {}
|
||||
_semaphores = lockutils.Semaphores()
|
||||
|
||||
def __call__(cls, *args, **kwargs):
|
||||
with lockutils.lock('singleton_lock', semaphores=cls._semaphores):
|
||||
if cls not in cls._instances:
|
||||
cls._instances[cls] = super().__call__(
|
||||
*args, **kwargs)
|
||||
return cls._instances[cls]
|
||||
|
||||
|
||||
class SignalHandler(metaclass=Singleton):
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
self.__setup_signal_interruption()
|
||||
|
||||
# Map all signal names to signal integer values and create a
|
||||
# reverse mapping (for easier + quick lookup).
|
||||
self._ignore_signals = ('SIG_DFL', 'SIG_IGN')
|
||||
self._signals_by_name = {name: getattr(signal, name)
|
||||
for name in dir(signal)
|
||||
if name.startswith("SIG") and
|
||||
name not in self._ignore_signals}
|
||||
self.signals_to_name = {
|
||||
sigval: name
|
||||
for (name, sigval) in self._signals_by_name.items()}
|
||||
self._signal_handlers = collections.defaultdict(list)
|
||||
self.clear()
|
||||
|
||||
def clear(self):
|
||||
for sig in self._signal_handlers:
|
||||
signal.signal(sig, signal.SIG_DFL)
|
||||
self._signal_handlers.clear()
|
||||
|
||||
def add_handlers(self, signals, handler):
|
||||
for sig in signals:
|
||||
self.add_handler(sig, handler)
|
||||
|
||||
def add_handler(self, sig, handler):
|
||||
if not self.is_signal_supported(sig):
|
||||
return
|
||||
signo = self._signals_by_name[sig]
|
||||
self._signal_handlers[signo].append(handler)
|
||||
signal.signal(signo, self._handle_signal)
|
||||
|
||||
def _handle_signal(self, signo, frame):
|
||||
# This method can be called anytime, even between two Python
|
||||
# instructions. It's scheduled by the C signal handler of Python using
|
||||
# Py_AddPendingCall().
|
||||
#
|
||||
# We only do one thing: schedule a call to _handle_signal_cb() later.
|
||||
# eventlet.spawn() is not signal-safe: _handle_signal() can be called
|
||||
# during a call to eventlet.spawn(). This case is supported, it is
|
||||
# ok to schedule multiple calls to _handle_signal() with the same
|
||||
# signal number.
|
||||
#
|
||||
# To call to _handle_signal_cb() is delayed to avoid reentrant calls to
|
||||
# _handle_signal_cb(). It avoids race conditions like reentrant call to
|
||||
# clear(): clear() is not reentrant (bug #1538204).
|
||||
eventlet.spawn(self._handle_signal_cb, signo, frame)
|
||||
|
||||
# On Python >= 3.5, ensure that eventlet's poll() or sleep() call is
|
||||
# interrupted by raising an exception. If the signal handler does not
|
||||
# raise an exception then due to PEP 475 the call will not return until
|
||||
# an event is detected on a file descriptor or the timeout is reached,
|
||||
# and thus eventlet will not wake up and notice that there has been a
|
||||
# new thread spawned.
|
||||
if self.__force_interrupt_on_signal:
|
||||
try:
|
||||
interrupted_frame = inspect.stack(context=0)[1]
|
||||
except IndexError:
|
||||
pass
|
||||
else:
|
||||
if ((interrupted_frame.function == 'do_poll' and
|
||||
interrupted_frame.filename == self.__hub_module_file) or
|
||||
(interrupted_frame.function == 'do_sleep' and
|
||||
interrupted_frame.filename == __file__)):
|
||||
raise OSError(errno.EINTR, 'Interrupted')
|
||||
|
||||
def __setup_signal_interruption(self):
|
||||
"""Set up to do the Right Thing with signals during poll() and sleep().
|
||||
|
||||
Deal with the changes introduced in PEP 475 that prevent a signal from
|
||||
interrupting eventlet's call to poll() or sleep().
|
||||
"""
|
||||
select_module = eventlet.patcher.original('select')
|
||||
self.__force_interrupt_on_signal = hasattr(select_module, 'poll')
|
||||
|
||||
if self.__force_interrupt_on_signal:
|
||||
try:
|
||||
from eventlet.hubs import poll as poll_hub
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
# This is a function we can test for in the stack when handling
|
||||
# a signal - it's safe to raise an IOError with EINTR anywhere
|
||||
# in this function.
|
||||
def do_sleep(time_sleep_func, seconds):
|
||||
return time_sleep_func(seconds)
|
||||
|
||||
time_sleep = eventlet.patcher.original('time').sleep
|
||||
|
||||
# Wrap time.sleep to ignore the interruption error we're
|
||||
# injecting from the signal handler. This makes the behaviour
|
||||
# the same as sleep() in Python 2, where EINTR causes the
|
||||
# sleep to be interrupted (and not resumed), but no exception
|
||||
# is raised.
|
||||
@functools.wraps(time_sleep)
|
||||
def sleep_wrapper(seconds):
|
||||
try:
|
||||
return do_sleep(time_sleep, seconds)
|
||||
except (OSError, InterruptedError) as err:
|
||||
if err.errno != errno.EINTR:
|
||||
raise
|
||||
|
||||
poll_hub.sleep = sleep_wrapper
|
||||
|
||||
hub = eventlet.hubs.get_hub()
|
||||
self.__hub_module_file = sys.modules[hub.__module__].__file__
|
||||
|
||||
def _handle_signal_cb(self, signo, frame):
|
||||
for handler in reversed(self._signal_handlers[signo]):
|
||||
handler(signo, frame)
|
||||
|
||||
def is_signal_supported(self, sig_name):
|
||||
return sig_name in self._signals_by_name
|
||||
|
||||
|
||||
class Launcher:
|
||||
"""Launch one or more services and wait for them to complete."""
|
||||
|
||||
def __init__(self, conf, restart_method='reload'):
|
||||
"""Initialize the service launcher.
|
||||
|
||||
:param restart_method: If 'reload', calls reload_config_files on
|
||||
SIGHUP. If 'mutate', calls mutate_config_files on SIGHUP. Other
|
||||
values produce a ValueError.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.conf = conf
|
||||
conf.register_opts(_options.service_opts)
|
||||
self.services = Services(restart_method=restart_method)
|
||||
self.backdoor_port = (
|
||||
eventlet_backdoor.initialize_if_enabled(self.conf))
|
||||
self.restart_method = restart_method
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
"""Load and start the given service.
|
||||
|
||||
:param service: The service you would like to start, must be an
|
||||
instance of :class:`oslo_service.service.ServiceBase`
|
||||
:param workers: This param makes this method compatible with
|
||||
ProcessLauncher.launch_service. It must be None, 1 or
|
||||
omitted.
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
if workers is not None and workers != 1:
|
||||
raise ValueError(_("Launcher asked to start multiple workers"))
|
||||
_check_service_base(service)
|
||||
service.backdoor_port = self.backdoor_port
|
||||
self.services.add(service)
|
||||
|
||||
def stop(self):
|
||||
"""Stop all services which are currently running.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Wait until all services have been stopped, and then return.
|
||||
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
self.services.wait()
|
||||
|
||||
def restart(self):
|
||||
"""Reload config files and restart service.
|
||||
|
||||
:returns: The return value from reload_config_files or
|
||||
mutate_config_files, according to the restart_method.
|
||||
"""
|
||||
if self.restart_method == 'reload':
|
||||
self.conf.reload_config_files()
|
||||
else: # self.restart_method == 'mutate'
|
||||
self.conf.mutate_config_files()
|
||||
self.services.restart()
|
||||
|
||||
|
||||
class SignalExit(SystemExit):
|
||||
def __init__(self, signo, exccode=1):
|
||||
super().__init__(exccode)
|
||||
self.signo = signo
|
||||
|
||||
|
||||
class ServiceLauncher(Launcher):
|
||||
"""Runs one or more service in a parent process."""
|
||||
def __init__(self, conf, restart_method='reload'):
|
||||
"""Constructor.
|
||||
|
||||
:param conf: an instance of ConfigOpts
|
||||
:param restart_method: passed to super
|
||||
"""
|
||||
super().__init__(
|
||||
conf, restart_method=restart_method)
|
||||
self.signal_handler = SignalHandler()
|
||||
|
||||
def _graceful_shutdown(self, *args):
|
||||
self.signal_handler.clear()
|
||||
if (self.conf.graceful_shutdown_timeout and
|
||||
self.signal_handler.is_signal_supported('SIGALRM')):
|
||||
signal.alarm(self.conf.graceful_shutdown_timeout)
|
||||
self.stop()
|
||||
|
||||
def _reload_service(self, *args):
|
||||
self.signal_handler.clear()
|
||||
raise SignalExit(signal.SIGHUP)
|
||||
|
||||
def _fast_exit(self, *args):
|
||||
LOG.info('Caught SIGINT signal, instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def _on_timeout_exit(self, *args):
|
||||
LOG.info('Graceful shutdown timeout exceeded, '
|
||||
'instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def handle_signal(self):
|
||||
"""Set self._handle_signal as a signal handler."""
|
||||
self.signal_handler.clear()
|
||||
self.signal_handler.add_handler('SIGTERM', self._graceful_shutdown)
|
||||
self.signal_handler.add_handler('SIGINT', self._fast_exit)
|
||||
self.signal_handler.add_handler('SIGHUP', self._reload_service)
|
||||
self.signal_handler.add_handler('SIGALRM', self._on_timeout_exit)
|
||||
|
||||
def _wait_for_exit_or_signal(self):
|
||||
status = None
|
||||
signo = 0
|
||||
|
||||
if self.conf.log_options:
|
||||
LOG.debug('Full set of CONF:')
|
||||
self.conf.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
super().wait()
|
||||
except SignalExit as exc:
|
||||
signame = self.signal_handler.signals_to_name[exc.signo]
|
||||
LOG.info('Caught %s, handling', signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
self.stop()
|
||||
status = exc.code
|
||||
except Exception:
|
||||
self.stop()
|
||||
return status, signo
|
||||
|
||||
def wait(self):
|
||||
"""Wait for a service to terminate and restart it on SIGHUP.
|
||||
|
||||
:returns: termination status
|
||||
"""
|
||||
systemd.notify_once()
|
||||
self.signal_handler.clear()
|
||||
while True:
|
||||
self.handle_signal()
|
||||
status, signo = self._wait_for_exit_or_signal()
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
break
|
||||
self.restart()
|
||||
|
||||
super().wait()
|
||||
return status
|
||||
|
||||
|
||||
class ServiceWrapper:
|
||||
def __init__(self, service, workers):
|
||||
self.service = service
|
||||
self.workers = workers
|
||||
self.children = set()
|
||||
self.forktimes = []
|
||||
|
||||
|
||||
class ProcessLauncher:
|
||||
"""Launch a service with a given number of workers."""
|
||||
|
||||
def __init__(self, conf, wait_interval=0.01, restart_method='reload'):
|
||||
"""Constructor.
|
||||
|
||||
:param conf: an instance of ConfigOpts
|
||||
:param wait_interval: The interval to sleep for between checks
|
||||
of child process exit.
|
||||
:param restart_method: If 'reload', calls reload_config_files on
|
||||
SIGHUP. If 'mutate', calls mutate_config_files on SIGHUP. Other
|
||||
values produce a ValueError.
|
||||
"""
|
||||
self.conf = conf
|
||||
conf.register_opts(_options.service_opts)
|
||||
self.children = {}
|
||||
self.sigcaught = None
|
||||
self.running = True
|
||||
self.wait_interval = wait_interval
|
||||
self.launcher = None
|
||||
rfd, self.writepipe = os.pipe()
|
||||
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
|
||||
self.signal_handler = SignalHandler()
|
||||
self.handle_signal()
|
||||
self.restart_method = restart_method
|
||||
if restart_method not in _LAUNCHER_RESTART_METHODS:
|
||||
raise ValueError(_("Invalid restart_method: %s") % restart_method)
|
||||
|
||||
def handle_signal(self):
|
||||
"""Add instance's signal handlers to class handlers."""
|
||||
self.signal_handler.add_handler('SIGTERM', self._handle_term)
|
||||
self.signal_handler.add_handler('SIGHUP', self._handle_hup)
|
||||
self.signal_handler.add_handler('SIGINT', self._fast_exit)
|
||||
self.signal_handler.add_handler('SIGALRM', self._on_alarm_exit)
|
||||
|
||||
def _handle_term(self, signo, frame):
|
||||
"""Handle a TERM event.
|
||||
|
||||
:param signo: signal number
|
||||
:param frame: current stack frame
|
||||
"""
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Allow the process to be killed again and die from natural causes
|
||||
self.signal_handler.clear()
|
||||
|
||||
def _handle_hup(self, signo, frame):
|
||||
"""Handle a HUP event.
|
||||
|
||||
:param signo: signal number
|
||||
:param frame: current stack frame
|
||||
"""
|
||||
self.sigcaught = signo
|
||||
self.running = False
|
||||
|
||||
# Do NOT clear the signal_handler, allowing multiple SIGHUPs to be
|
||||
# received swiftly. If a non-HUP is received before #wait loops, the
|
||||
# second event will "overwrite" the HUP. This is fine.
|
||||
|
||||
def _fast_exit(self, signo, frame):
|
||||
LOG.info('Caught SIGINT signal, instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def _on_alarm_exit(self, signo, frame):
|
||||
LOG.info('Graceful shutdown timeout exceeded, '
|
||||
'instantaneous exiting')
|
||||
os._exit(1)
|
||||
|
||||
def _pipe_watcher(self):
|
||||
# This will block until the write end is closed when the parent
|
||||
# dies unexpectedly
|
||||
self.readpipe.read(1)
|
||||
|
||||
LOG.info('Parent process has died unexpectedly, exiting')
|
||||
|
||||
if self.launcher:
|
||||
self.launcher.stop()
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
def _child_process_handle_signal(self):
|
||||
# Setup child signal handlers differently
|
||||
|
||||
def _sigterm(*args):
|
||||
self.signal_handler.clear()
|
||||
self.launcher.stop()
|
||||
|
||||
def _sighup(*args):
|
||||
self.signal_handler.clear()
|
||||
raise SignalExit(signal.SIGHUP)
|
||||
|
||||
self.signal_handler.clear()
|
||||
|
||||
# Parent signals with SIGTERM when it wants us to go away.
|
||||
self.signal_handler.add_handler('SIGTERM', _sigterm)
|
||||
self.signal_handler.add_handler('SIGHUP', _sighup)
|
||||
self.signal_handler.add_handler('SIGINT', self._fast_exit)
|
||||
|
||||
def _child_wait_for_exit_or_signal(self, launcher):
|
||||
status = 0
|
||||
signo = 0
|
||||
|
||||
# NOTE(johannes): All exceptions are caught to ensure this
|
||||
# doesn't fallback into the loop spawning children. It would
|
||||
# be bad for a child to spawn more children.
|
||||
try:
|
||||
launcher.wait()
|
||||
except SignalExit as exc:
|
||||
signame = self.signal_handler.signals_to_name[exc.signo]
|
||||
LOG.info('Child caught %s, handling', signame)
|
||||
status = exc.code
|
||||
signo = exc.signo
|
||||
except SystemExit as exc:
|
||||
launcher.stop()
|
||||
status = exc.code
|
||||
except BaseException:
|
||||
launcher.stop()
|
||||
LOG.exception('Unhandled exception')
|
||||
status = 2
|
||||
|
||||
return status, signo
|
||||
|
||||
def _child_process(self, service):
|
||||
self._child_process_handle_signal()
|
||||
|
||||
# Reopen the eventlet hub to make sure we don't share an epoll
|
||||
# fd with parent and/or siblings, which would be bad
|
||||
eventlet.hubs.use_hub()
|
||||
|
||||
# Close write to ensure only parent has it open
|
||||
os.close(self.writepipe)
|
||||
# Create greenthread to watch for parent to close pipe
|
||||
eventlet.spawn_n(self._pipe_watcher)
|
||||
|
||||
# Reseed random number generator
|
||||
random.seed()
|
||||
|
||||
launcher = Launcher(self.conf, restart_method=self.restart_method)
|
||||
launcher.launch_service(service)
|
||||
return launcher
|
||||
|
||||
def _start_child(self, wrap):
|
||||
if len(wrap.forktimes) > wrap.workers:
|
||||
# Limit ourselves to one process a second (over the period of
|
||||
# number of workers * 1 second). This will allow workers to
|
||||
# start up quickly but ensure we don't fork off children that
|
||||
# die instantly too quickly.
|
||||
if time.time() - wrap.forktimes[0] < wrap.workers:
|
||||
LOG.info('Forking too fast, sleeping')
|
||||
time.sleep(1)
|
||||
|
||||
wrap.forktimes.pop(0)
|
||||
|
||||
wrap.forktimes.append(time.time())
|
||||
|
||||
pid = os.fork()
|
||||
if pid == 0:
|
||||
# When parent used native threads the library on child needs to be
|
||||
# "reset", otherwise native threads won't work on the child.
|
||||
tpool.killall()
|
||||
self.launcher = self._child_process(wrap.service)
|
||||
while True:
|
||||
self._child_process_handle_signal()
|
||||
status, signo = self._child_wait_for_exit_or_signal(
|
||||
self.launcher)
|
||||
if not _is_sighup_and_daemon(signo):
|
||||
self.launcher.wait()
|
||||
break
|
||||
self.launcher.restart()
|
||||
|
||||
os._exit(status)
|
||||
|
||||
LOG.debug('Started child %d', pid)
|
||||
|
||||
wrap.children.add(pid)
|
||||
self.children[pid] = wrap
|
||||
|
||||
return pid
|
||||
|
||||
def launch_service(self, service, workers=1):
|
||||
"""Launch a service with a given number of workers.
|
||||
|
||||
:param service: a service to launch, must be an instance of
|
||||
:class:`oslo_service.service.ServiceBase`
|
||||
:param workers: a number of processes in which a service
|
||||
will be running
|
||||
"""
|
||||
_check_service_base(service)
|
||||
wrap = ServiceWrapper(service, workers)
|
||||
|
||||
# Hide existing objects from the garbage collector, so that most
|
||||
# existing pages will remain in shared memory rather than being
|
||||
# duplicated between subprocesses in the GC mark-and-sweep. (Requires
|
||||
# Python 3.7 or later.)
|
||||
if hasattr(gc, 'freeze'):
|
||||
gc.freeze()
|
||||
|
||||
LOG.info('Starting %d workers', wrap.workers)
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def _wait_child(self):
|
||||
try:
|
||||
# Don't block if no child processes have exited
|
||||
pid, status = os.waitpid(0, os.WNOHANG)
|
||||
if not pid:
|
||||
return None
|
||||
except OSError as exc:
|
||||
if exc.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
return None
|
||||
|
||||
if os.WIFSIGNALED(status):
|
||||
sig = os.WTERMSIG(status)
|
||||
LOG.info('Child %(pid)d killed by signal %(sig)d',
|
||||
dict(pid=pid, sig=sig))
|
||||
else:
|
||||
code = os.WEXITSTATUS(status)
|
||||
LOG.info('Child %(pid)s exited with status %(code)d',
|
||||
dict(pid=pid, code=code))
|
||||
|
||||
if pid not in self.children:
|
||||
LOG.warning('pid %d not in child list', pid)
|
||||
return None
|
||||
|
||||
wrap = self.children.pop(pid)
|
||||
wrap.children.remove(pid)
|
||||
return wrap
|
||||
|
||||
def _respawn_children(self):
|
||||
while self.running:
|
||||
wrap = self._wait_child()
|
||||
if not wrap:
|
||||
# Yield to other threads if no children have exited
|
||||
# Sleep for a short time to avoid excessive CPU usage
|
||||
# (see bug #1095346)
|
||||
eventlet.greenthread.sleep(self.wait_interval)
|
||||
continue
|
||||
while self.running and len(wrap.children) < wrap.workers:
|
||||
self._start_child(wrap)
|
||||
|
||||
def wait(self):
|
||||
"""Loop waiting on children to die and respawning as necessary."""
|
||||
|
||||
systemd.notify_once()
|
||||
if self.conf.log_options:
|
||||
LOG.debug('Full set of CONF:')
|
||||
self.conf.log_opt_values(LOG, logging.DEBUG)
|
||||
|
||||
try:
|
||||
while True:
|
||||
self.handle_signal()
|
||||
self._respawn_children()
|
||||
# No signal means that stop was called. Don't clean up here.
|
||||
if not self.sigcaught:
|
||||
return
|
||||
|
||||
signame = self.signal_handler.signals_to_name[self.sigcaught]
|
||||
LOG.info('Caught %s, stopping children', signame)
|
||||
if not _is_sighup_and_daemon(self.sigcaught):
|
||||
break
|
||||
|
||||
child_signal = signal.SIGTERM
|
||||
if self.restart_method == 'reload':
|
||||
self.conf.reload_config_files()
|
||||
elif self.restart_method == 'mutate':
|
||||
self.conf.mutate_config_files()
|
||||
child_signal = signal.SIGHUP
|
||||
for service in {
|
||||
wrap.service for wrap in self.children.values()}:
|
||||
service.reset()
|
||||
|
||||
for pid in self.children:
|
||||
os.kill(pid, child_signal)
|
||||
|
||||
self.running = True
|
||||
self.sigcaught = None
|
||||
except eventlet.greenlet.GreenletExit:
|
||||
LOG.info("Wait called after thread killed. Cleaning up.")
|
||||
|
||||
# if we are here it means that we are trying to do graceful shutdown.
|
||||
# add alarm watching that graceful_shutdown_timeout is not exceeded
|
||||
if (self.conf.graceful_shutdown_timeout and
|
||||
self.signal_handler.is_signal_supported('SIGALRM')):
|
||||
signal.alarm(self.conf.graceful_shutdown_timeout)
|
||||
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
"""Terminate child processes and wait on each."""
|
||||
self.running = False
|
||||
|
||||
LOG.debug("Stop services.")
|
||||
for service in {
|
||||
wrap.service for wrap in self.children.values()}:
|
||||
service.stop()
|
||||
|
||||
LOG.debug("Killing children.")
|
||||
for pid in self.children:
|
||||
try:
|
||||
os.kill(pid, signal.SIGTERM)
|
||||
except OSError as exc:
|
||||
if exc.errno != errno.ESRCH:
|
||||
raise
|
||||
|
||||
# Wait for children to die
|
||||
if self.children:
|
||||
LOG.info('Waiting on %d children to exit', len(self.children))
|
||||
while self.children:
|
||||
self._wait_child()
|
||||
|
||||
|
||||
class Service(ServiceBase):
|
||||
"""Service object for binaries running on hosts."""
|
||||
|
||||
def __init__(self, threads=1000):
|
||||
self.tg = threadgroup.ThreadGroup(threads)
|
||||
|
||||
def reset(self):
|
||||
"""Reset a service in case it received a SIGHUP."""
|
||||
|
||||
def start(self):
|
||||
"""Start a service."""
|
||||
|
||||
def stop(self, graceful=False):
|
||||
"""Stop a service.
|
||||
|
||||
:param graceful: indicates whether to wait for all threads to finish
|
||||
or terminate them instantly
|
||||
"""
|
||||
self.tg.stop(graceful)
|
||||
|
||||
def wait(self):
|
||||
"""Wait for a service to shut down."""
|
||||
self.tg.wait()
|
||||
|
||||
|
||||
class Services:
|
||||
|
||||
def __init__(self, restart_method='reload'):
|
||||
if restart_method not in _LAUNCHER_RESTART_METHODS:
|
||||
raise ValueError(_("Invalid restart_method: %s") % restart_method)
|
||||
self.restart_method = restart_method
|
||||
self.services = []
|
||||
self.tg = threadgroup.ThreadGroup()
|
||||
self.done = event.Event()
|
||||
|
||||
def add(self, service):
|
||||
"""Add a service to a list and create a thread to run it.
|
||||
|
||||
:param service: service to run
|
||||
"""
|
||||
self.services.append(service)
|
||||
self.tg.add_thread(self.run_service, service, self.done)
|
||||
|
||||
def stop(self):
|
||||
"""Wait for graceful shutdown of services and kill the threads."""
|
||||
for service in self.services:
|
||||
service.stop()
|
||||
|
||||
# Each service has performed cleanup, now signal that the run_service
|
||||
# wrapper threads can now die:
|
||||
if not self.done.ready():
|
||||
self.done.send()
|
||||
|
||||
# reap threads:
|
||||
self.tg.stop()
|
||||
|
||||
def wait(self):
|
||||
"""Wait for services to shut down."""
|
||||
for service in self.services:
|
||||
service.wait()
|
||||
self.tg.wait()
|
||||
|
||||
def restart(self):
|
||||
"""Reset services.
|
||||
|
||||
The behavior of this function varies depending on the value of the
|
||||
restart_method member. If the restart_method is `reload`, then it
|
||||
will stop the services, reset them, and start them in new threads.
|
||||
If the restart_method is `mutate`, then it will just reset the
|
||||
services without restarting them.
|
||||
"""
|
||||
if self.restart_method == 'reload':
|
||||
self.stop()
|
||||
self.done = event.Event()
|
||||
for restart_service in self.services:
|
||||
restart_service.reset()
|
||||
if self.restart_method == 'reload':
|
||||
self.tg.add_thread(self.run_service,
|
||||
restart_service,
|
||||
self.done)
|
||||
|
||||
@staticmethod
|
||||
def run_service(service, done):
|
||||
"""Service start wrapper.
|
||||
|
||||
:param service: service to run
|
||||
:param done: event to wait on until a shutdown is triggered
|
||||
:returns: None
|
||||
|
||||
"""
|
||||
try:
|
||||
service.start()
|
||||
except Exception:
|
||||
LOG.exception('Error starting thread.')
|
||||
raise SystemExit(1)
|
||||
else:
|
||||
done.wait()
|
||||
|
||||
|
||||
def launch(conf, service, workers=1, restart_method='reload'):
|
||||
"""Launch a service with a given number of workers.
|
||||
|
||||
:param conf: an instance of ConfigOpts
|
||||
:param service: a service to launch, must be an instance of
|
||||
:class:`oslo_service.service.ServiceBase`
|
||||
:param workers: a number of processes in which a service will be running,
|
||||
type should be int.
|
||||
:param restart_method: Passed to the constructed launcher. If 'reload', the
|
||||
launcher will call reload_config_files on SIGHUP. If 'mutate', it will
|
||||
call mutate_config_files on SIGHUP. Other values produce a ValueError.
|
||||
:returns: instance of a launcher that was used to launch the service
|
||||
"""
|
||||
|
||||
if workers is not None and not isinstance(workers, int):
|
||||
raise TypeError(_("Type of workers should be int!"))
|
||||
|
||||
if workers is not None and workers <= 0:
|
||||
raise ValueError(_("Number of workers should be positive!"))
|
||||
|
||||
if workers is None or workers == 1:
|
||||
launcher = ServiceLauncher(conf, restart_method=restart_method)
|
||||
else:
|
||||
launcher = ProcessLauncher(conf, restart_method=restart_method)
|
||||
launcher.launch_service(service, workers=workers)
|
||||
|
||||
return launcher
|
||||
|
@ -19,6 +19,18 @@ import ssl
|
||||
from oslo_service._i18n import _
|
||||
from oslo_service import _options
|
||||
|
||||
from debtcollector import removals
|
||||
|
||||
removals.removed_module(
|
||||
__name__,
|
||||
replacement=None,
|
||||
removal_version="2026.2",
|
||||
message=(
|
||||
"The 'oslo_service.sslutils' module is deprecated and will be removed"
|
||||
" in version 2026.2."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
config_section = 'ssl'
|
||||
|
||||
|
@ -23,6 +23,9 @@ from oslo_service.backend import init_backend
|
||||
|
||||
|
||||
class TestBackend(unittest.TestCase):
|
||||
def setUp(self):
|
||||
backend_module._reset_backend()
|
||||
|
||||
def tearDown(self):
|
||||
backend_module._reset_backend()
|
||||
|
||||
|
@ -438,7 +438,7 @@ class ProcessLauncherTest(base.ServiceBaseTestCase):
|
||||
fakeServiceWrapper = service.ServiceWrapper(service.Service(), 1)
|
||||
launcher.children = {pid_nums[0]: fakeServiceWrapper,
|
||||
pid_nums[1]: fakeServiceWrapper}
|
||||
with mock.patch('oslo_service.service.os.kill') as mock_kill:
|
||||
with mock.patch('os.kill') as mock_kill:
|
||||
with mock.patch.object(launcher, '_wait_child') as _wait_child:
|
||||
|
||||
def fake_wait_child():
|
||||
|
@ -12,418 +12,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import warnings
|
||||
from oslo_service.backend import get_component
|
||||
|
||||
from debtcollector import removals
|
||||
import eventlet
|
||||
from eventlet import greenpool
|
||||
|
||||
from oslo_service import loopingcall
|
||||
from oslo_utils import timeutils
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _on_thread_done(_greenthread, group, thread):
|
||||
"""Callback function to be passed to GreenThread.link() when we spawn().
|
||||
|
||||
Calls the :class:`ThreadGroup` to notify it to remove this thread from
|
||||
the associated group.
|
||||
"""
|
||||
group.thread_done(thread)
|
||||
|
||||
|
||||
class Thread:
|
||||
"""Wrapper around a greenthread.
|
||||
|
||||
Holds a reference to the :class:`ThreadGroup`. The Thread will notify
|
||||
the :class:`ThreadGroup` when it has done so it can be removed from
|
||||
the threads list.
|
||||
"""
|
||||
def __init__(self, thread, group, link=True):
|
||||
self.thread = thread
|
||||
if link:
|
||||
self.thread.link(_on_thread_done, group, self)
|
||||
self._ident = id(thread)
|
||||
|
||||
@property
|
||||
def ident(self):
|
||||
return self._ident
|
||||
|
||||
def stop(self):
|
||||
"""Kill the thread by raising GreenletExit within it."""
|
||||
self.thread.kill()
|
||||
|
||||
def wait(self):
|
||||
"""Block until the thread completes and return the result."""
|
||||
return self.thread.wait()
|
||||
|
||||
def link(self, func, *args, **kwargs):
|
||||
"""Schedule a function to be run upon completion of the thread."""
|
||||
self.thread.link(func, *args, **kwargs)
|
||||
|
||||
def cancel(self, *throw_args):
|
||||
"""Prevent the thread from starting if it has not already done so.
|
||||
|
||||
:param throw_args: the `exc_info` data to raise from :func:`wait`.
|
||||
"""
|
||||
self.thread.cancel(*throw_args)
|
||||
|
||||
|
||||
class ThreadGroup:
|
||||
"""A group of greenthreads and timers.
|
||||
|
||||
The point of the ThreadGroup class is to:
|
||||
|
||||
* keep track of timers and greenthreads (making it easier to stop them
|
||||
when need be).
|
||||
* provide an easy API to add timers.
|
||||
|
||||
.. note::
|
||||
The API is inconsistent, confusing, and not orthogonal. The same verbs
|
||||
often mean different things when applied to timers and threads,
|
||||
respectively. Read the documentation carefully.
|
||||
"""
|
||||
|
||||
def __init__(self, thread_pool_size=10):
|
||||
"""Create a ThreadGroup with a pool of greenthreads.
|
||||
|
||||
:param thread_pool_size: the maximum number of threads allowed to run
|
||||
concurrently.
|
||||
"""
|
||||
self.pool = greenpool.GreenPool(thread_pool_size)
|
||||
self.threads = []
|
||||
self.timers = []
|
||||
|
||||
def add_dynamic_timer(self, callback, initial_delay=None,
|
||||
periodic_interval_max=None, *args, **kwargs):
|
||||
"""Add a timer that controls its own period dynamically.
|
||||
|
||||
The period of each iteration of the timer is controlled by the return
|
||||
value of the callback function on the previous iteration.
|
||||
|
||||
.. warning::
|
||||
Passing arguments to the callback function is deprecated. Use the
|
||||
:func:`add_dynamic_timer_args` method to pass arguments for the
|
||||
callback function.
|
||||
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:param periodic_interval_max: The maximum interval in seconds to allow
|
||||
the callback function to request. If
|
||||
provided, this is also used as the
|
||||
default delay if None is returned by the
|
||||
callback function.
|
||||
:returns: an :class:`oslo_service.loopingcall.DynamicLoopingCall`
|
||||
instance
|
||||
"""
|
||||
if args or kwargs:
|
||||
warnings.warn("Calling add_dynamic_timer() with arguments to the "
|
||||
"callback function is deprecated. Use "
|
||||
"add_dynamic_timer_args() instead.",
|
||||
DeprecationWarning)
|
||||
return self.add_dynamic_timer_args(
|
||||
callback, args, kwargs,
|
||||
initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max)
|
||||
|
||||
def add_dynamic_timer_args(self, callback, args=None, kwargs=None,
|
||||
initial_delay=None, periodic_interval_max=None,
|
||||
stop_on_exception=True):
|
||||
"""Add a timer that controls its own period dynamically.
|
||||
|
||||
The period of each iteration of the timer is controlled by the return
|
||||
value of the callback function on the previous iteration.
|
||||
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param args: A list of positional args to the callback function.
|
||||
:param kwargs: A dict of keyword args to the callback function.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:param periodic_interval_max: The maximum interval in seconds to allow
|
||||
the callback function to request. If
|
||||
provided, this is also used as the
|
||||
default delay if None is returned by the
|
||||
callback function.
|
||||
:param stop_on_exception: Pass ``False`` to have the timer continue
|
||||
running even if the callback function raises
|
||||
an exception.
|
||||
:returns: an :class:`oslo_service.loopingcall.DynamicLoopingCall`
|
||||
instance
|
||||
"""
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
|
||||
timer.start(initial_delay=initial_delay,
|
||||
periodic_interval_max=periodic_interval_max,
|
||||
stop_on_exception=stop_on_exception)
|
||||
self.timers.append(timer)
|
||||
return timer
|
||||
|
||||
def add_timer(self, interval, callback, initial_delay=None,
|
||||
*args, **kwargs):
|
||||
"""Add a timer with a fixed period.
|
||||
|
||||
.. warning::
|
||||
Passing arguments to the callback function is deprecated. Use the
|
||||
:func:`add_timer_args` method to pass arguments for the callback
|
||||
function.
|
||||
|
||||
:param interval: The minimum period in seconds between calls to the
|
||||
callback function.
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:returns: an :class:`oslo_service.loopingcall.FixedIntervalLoopingCall`
|
||||
instance
|
||||
"""
|
||||
if args or kwargs:
|
||||
warnings.warn("Calling add_timer() with arguments to the callback "
|
||||
"function is deprecated. Use add_timer_args() "
|
||||
"instead.",
|
||||
DeprecationWarning)
|
||||
return self.add_timer_args(interval, callback, args, kwargs,
|
||||
initial_delay=initial_delay)
|
||||
|
||||
def add_timer_args(self, interval, callback, args=None, kwargs=None,
|
||||
initial_delay=None, stop_on_exception=True):
|
||||
"""Add a timer with a fixed period.
|
||||
|
||||
:param interval: The minimum period in seconds between calls to the
|
||||
callback function.
|
||||
:param callback: The callback function to run when the timer is
|
||||
triggered.
|
||||
:param args: A list of positional args to the callback function.
|
||||
:param kwargs: A dict of keyword args to the callback function.
|
||||
:param initial_delay: The delay in seconds before first triggering the
|
||||
timer. If not set, the timer is liable to be
|
||||
scheduled immediately.
|
||||
:param stop_on_exception: Pass ``False`` to have the timer continue
|
||||
running even if the callback function raises
|
||||
an exception.
|
||||
:returns: an :class:`oslo_service.loopingcall.FixedIntervalLoopingCall`
|
||||
instance
|
||||
"""
|
||||
args = args or []
|
||||
kwargs = kwargs or {}
|
||||
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
|
||||
pulse.start(interval=interval,
|
||||
initial_delay=initial_delay,
|
||||
stop_on_exception=stop_on_exception)
|
||||
self.timers.append(pulse)
|
||||
return pulse
|
||||
|
||||
def add_thread(self, callback, *args, **kwargs):
|
||||
"""Spawn a new thread.
|
||||
|
||||
This call will block until capacity is available in the thread pool.
|
||||
After that, it returns immediately (i.e. *before* the new thread is
|
||||
scheduled).
|
||||
|
||||
:param callback: the function to run in the new thread.
|
||||
:param args: positional arguments to the callback function.
|
||||
:param kwargs: keyword arguments to the callback function.
|
||||
:returns: a :class:`Thread` object
|
||||
"""
|
||||
gt = self.pool.spawn(callback, *args, **kwargs)
|
||||
th = Thread(gt, self, link=False)
|
||||
self.threads.append(th)
|
||||
gt.link(_on_thread_done, self, th)
|
||||
return th
|
||||
|
||||
def thread_done(self, thread):
|
||||
"""Remove a completed thread from the group.
|
||||
|
||||
This method is automatically called on completion of a thread in the
|
||||
group, and should not be called explicitly.
|
||||
"""
|
||||
self.threads.remove(thread)
|
||||
|
||||
def timer_done(self, timer):
|
||||
"""Remove a timer from the group.
|
||||
|
||||
:param timer: The timer object returned from :func:`add_timer` or its
|
||||
analogues.
|
||||
"""
|
||||
self.timers.remove(timer)
|
||||
|
||||
def _perform_action_on_threads(self, action_func, on_error_func):
|
||||
current = threading.current_thread()
|
||||
# Iterate over a copy of self.threads so thread_done doesn't
|
||||
# modify the list while we're iterating
|
||||
for x in self.threads[:]:
|
||||
if x.ident == current.ident:
|
||||
# Don't perform actions on the current thread.
|
||||
continue
|
||||
try:
|
||||
action_func(x)
|
||||
except eventlet.greenlet.GreenletExit: # nosec
|
||||
# greenlet exited successfully
|
||||
pass
|
||||
except Exception:
|
||||
on_error_func(x)
|
||||
|
||||
def _stop_threads(self):
|
||||
self._perform_action_on_threads(
|
||||
lambda x: x.stop(),
|
||||
lambda x: LOG.exception('Error stopping thread.'))
|
||||
|
||||
def stop_timers(self, wait=False):
|
||||
"""Stop all timers in the group and remove them from the group.
|
||||
|
||||
No new invocations of timers will be triggered after they are stopped,
|
||||
but calls that are in progress will not be interrupted.
|
||||
|
||||
To wait for in-progress calls to complete, pass ``wait=True`` - calling
|
||||
:func:`wait` will not have the desired effect as the timers will have
|
||||
already been removed from the group.
|
||||
|
||||
:param wait: If true, block until all timers have been stopped before
|
||||
returning.
|
||||
"""
|
||||
for timer in self.timers:
|
||||
timer.stop()
|
||||
if wait:
|
||||
self._wait_timers()
|
||||
self.timers = []
|
||||
|
||||
def stop(self, graceful=False):
|
||||
"""Stop all timers and threads in the group.
|
||||
|
||||
No new invocations of timers will be triggered after they are stopped,
|
||||
but calls that are in progress will not be interrupted.
|
||||
|
||||
If ``graceful`` is false, kill all threads immediately by raising
|
||||
GreenletExit. Note that in this case, this method will **not** block
|
||||
until all threads and running timer callbacks have actually exited. To
|
||||
guarantee that all threads have exited, call :func:`wait`.
|
||||
|
||||
If ``graceful`` is true, do not kill threads. Block until all threads
|
||||
and running timer callbacks have completed. This is equivalent to
|
||||
calling :func:`stop_timers` with ``wait=True`` followed by
|
||||
:func:`wait`.
|
||||
|
||||
:param graceful: If true, block until all timers have stopped and all
|
||||
threads completed; never kill threads. Otherwise,
|
||||
kill threads immediately and return immediately even
|
||||
if there are timer callbacks still running.
|
||||
"""
|
||||
self.stop_timers(wait=graceful)
|
||||
if graceful:
|
||||
# In case of graceful=True, wait for all threads to be
|
||||
# finished, never kill threads
|
||||
self._wait_threads()
|
||||
else:
|
||||
# In case of graceful=False(Default), kill threads
|
||||
# immediately
|
||||
self._stop_threads()
|
||||
|
||||
def _wait_timers(self):
|
||||
for x in self.timers:
|
||||
try:
|
||||
x.wait()
|
||||
except eventlet.greenlet.GreenletExit: # nosec
|
||||
# greenlet exited successfully
|
||||
pass
|
||||
except Exception:
|
||||
LOG.exception('Error waiting on timer.')
|
||||
|
||||
def _wait_threads(self):
|
||||
self._perform_action_on_threads(
|
||||
lambda x: x.wait(),
|
||||
lambda x: LOG.exception('Error waiting on thread.'))
|
||||
|
||||
def wait(self):
|
||||
"""Block until all timers and threads in the group are complete.
|
||||
|
||||
.. note::
|
||||
Before calling this method, any timers should be stopped first by
|
||||
calling :func:`stop_timers`, :func:`stop`, or :func:`cancel` with a
|
||||
``timeout`` argument. Otherwise this will block forever.
|
||||
|
||||
.. note::
|
||||
Calling :func:`stop_timers` removes the timers from the group, so a
|
||||
subsequent call to this method will not wait for any in-progress
|
||||
timer calls to complete.
|
||||
|
||||
Any exceptions raised by the threads will be logged but suppressed.
|
||||
|
||||
.. note::
|
||||
This call guarantees only that the threads themselves have
|
||||
completed, **not** that any cleanup functions added via
|
||||
:func:`Thread.link` have completed.
|
||||
"""
|
||||
self._wait_timers()
|
||||
self._wait_threads()
|
||||
|
||||
def _any_threads_alive(self):
|
||||
current = threading.current_thread()
|
||||
for x in self.threads[:]:
|
||||
if x.ident == current.ident:
|
||||
# Don't check current thread.
|
||||
continue
|
||||
if not x.thread.dead:
|
||||
return True
|
||||
return False
|
||||
|
||||
@removals.remove(removal_version='?')
|
||||
def cancel(self, *throw_args, **kwargs):
|
||||
"""Cancel unstarted threads in the group, and optionally stop the rest.
|
||||
|
||||
.. warning::
|
||||
This method is deprecated and should not be used. It will be
|
||||
removed in a future release.
|
||||
|
||||
If called without the ``timeout`` argument, this does **not** stop any
|
||||
running threads, but prevents any threads in the group that have not
|
||||
yet started from running, then returns immediately. Timers are not
|
||||
affected.
|
||||
|
||||
If the 'timeout' argument is supplied, then it serves as a grace period
|
||||
to allow running threads to finish. After the timeout, any threads in
|
||||
the group that are still running will be killed by raising GreenletExit
|
||||
in them, and all timers will be stopped (so that they are not
|
||||
retriggered - timer calls that are in progress will not be
|
||||
interrupted). This method will **not** block until all threads have
|
||||
actually exited, nor that all in-progress timer calls have completed.
|
||||
To guarantee that all threads have exited, call :func:`wait`. If all
|
||||
threads complete before the timeout expires, timers will be left
|
||||
running; there is no way to then stop those timers, so for consistent
|
||||
behaviour :func`stop_timers` should be called before calling this
|
||||
method.
|
||||
|
||||
:param throw_args: the `exc_info` data to raise from
|
||||
:func:`Thread.wait` for any of the unstarted
|
||||
threads. (Though note that :func:`ThreadGroup.wait`
|
||||
suppresses exceptions.)
|
||||
:param timeout: time to wait for running threads to complete before
|
||||
calling stop(). If not supplied, threads that are
|
||||
already running continue to completion.
|
||||
:param wait_time: length of time in seconds to sleep between checks of
|
||||
whether any threads are still alive. (Default 1s.)
|
||||
"""
|
||||
self._perform_action_on_threads(
|
||||
lambda x: x.cancel(*throw_args),
|
||||
lambda x: LOG.exception('Error canceling thread.'))
|
||||
|
||||
timeout = kwargs.get('timeout', None)
|
||||
if timeout is None:
|
||||
return
|
||||
wait_time = kwargs.get('wait_time', 1)
|
||||
watch = timeutils.StopWatch(duration=timeout)
|
||||
watch.start()
|
||||
while self._any_threads_alive():
|
||||
if not watch.expired():
|
||||
eventlet.sleep(wait_time)
|
||||
continue
|
||||
LOG.debug("Cancel timeout reached, stopping threads.")
|
||||
self.stop()
|
||||
Thread = get_component("Thread")
|
||||
ThreadGroup = get_component("ThreadGroup")
|
||||
|
@ -35,6 +35,19 @@ from oslo_service import _options
|
||||
from oslo_service import service
|
||||
from oslo_service import sslutils
|
||||
|
||||
from debtcollector import removals
|
||||
|
||||
removals.removed_module(
|
||||
__name__,
|
||||
replacement="uwsgi",
|
||||
removal_version="2026.2",
|
||||
message=(
|
||||
"The 'oslo_service.wsgi' module is deprecated and will be removed in "
|
||||
"version 2026.2. We recommend transitioning to 'uwsgi' for serving "
|
||||
"WSGI applications."
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
@ -0,0 +1,28 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
A new backend system has been introduced to modularize and improve
|
||||
the flexibility of the service implementation. This system allows
|
||||
developers to choose or implement alternative backends.
|
||||
|
||||
By default, the ``eventlet`` backend is used, preserving the previous
|
||||
behavior. The backend is dynamically loaded through the new
|
||||
backend mechanism.
|
||||
deprecations:
|
||||
- |
|
||||
The following modules are now deprecated and will be removed in a
|
||||
future release:
|
||||
|
||||
- ``oslo_service.wsgi``: Users are advised to migrate to ``uwsgi`` for
|
||||
serving WSGI applications.
|
||||
|
||||
- ``oslo_service.eventlet_backdoor``: This module has been deprecated and
|
||||
will be removed in version ``2026.2.`` It is no longer maintained.
|
||||
|
||||
- ``oslo_service.fixtures``: This module has been deprecated and
|
||||
will be removed in version ``2026.2``. It is no longer maintained.
|
||||
|
||||
- ``oslo_service.sslutils``: This module has been deprecated and
|
||||
will be removed in version ``2026.2``. It is no longer maintained.
|
||||
|
||||
Users should remove any dependencies on these modules as soon as possible.
|
Loading…
x
Reference in New Issue
Block a user