Add i18n warpping for all LOG messages
Add i18n wrapping for all LOG messages Change-Id: I7f9c71a3aa76364b291f7e21a1737b927cbdc300 Fixes: bug #1199678
This commit is contained in:
parent
1fe8e8b300
commit
7f542e3ac8
@ -18,6 +18,7 @@
|
|||||||
"""Log alarm notifier."""
|
"""Log alarm notifier."""
|
||||||
|
|
||||||
from ceilometer.alarm import notifier
|
from ceilometer.alarm import notifier
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -28,5 +29,9 @@ class LogAlarmNotifier(notifier.AlarmNotifier):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def notify(action, alarm_id, previous, current, reason):
|
def notify(action, alarm_id, previous, current, reason):
|
||||||
LOG.info("Notifying alarm %s from %s to %s with action %s because %s",
|
LOG.info(_(
|
||||||
alarm_id, previous, current, action, reason)
|
"Notifying alarm %(alarm_id)s from %(previous)s "
|
||||||
|
"to %(current)s with action %(action)s because "
|
||||||
|
"%(reason)s") % ({'alarm_id': alarm_id, 'previous': previous,
|
||||||
|
'current': current, 'action': action,
|
||||||
|
'reason': reason}))
|
||||||
|
@ -24,6 +24,7 @@ import urlparse
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.alarm import notifier
|
from ceilometer.alarm import notifier
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import jsonutils
|
from ceilometer.openstack.common import jsonutils
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
|
|
||||||
@ -54,8 +55,12 @@ class RestAlarmNotifier(notifier.AlarmNotifier):
|
|||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def notify(action, alarm_id, previous, current, reason):
|
def notify(action, alarm_id, previous, current, reason):
|
||||||
LOG.info("Notifying alarm %s from %s to %s with action %s because %s",
|
LOG.info(_(
|
||||||
alarm_id, previous, current, action, reason)
|
"Notifying alarm %(alarm_id)s from %(previous)s "
|
||||||
|
"to %(current)s with action %(action)s because "
|
||||||
|
"%(reason)s") % ({'alarm_id': alarm_id, 'previous': previous,
|
||||||
|
'current': current, 'action': action,
|
||||||
|
'reason': reason}))
|
||||||
body = {'alarm_id': alarm_id, 'previous': previous,
|
body = {'alarm_id': alarm_id, 'previous': previous,
|
||||||
'current': current, 'reason': reason}
|
'current': current, 'reason': reason}
|
||||||
kwargs = {'data': jsonutils.dumps(body)}
|
kwargs = {'data': jsonutils.dumps(body)}
|
||||||
|
@ -240,8 +240,8 @@ class PartitionCoordinator(object):
|
|||||||
# nothing to distribute, but check anyway if overtaken
|
# nothing to distribute, but check anyway if overtaken
|
||||||
still_ahead = self.this < self.oldest
|
still_ahead = self.this < self.oldest
|
||||||
self.last_alarms = set(alarms)
|
self.last_alarms = set(alarms)
|
||||||
LOG.info('%(this)s not overtaken as master? %(still_ahead)s' %
|
LOG.info(_('%(this)s not overtaken as master? %(still_ahead)s') %
|
||||||
dict(this=self.this, still_ahead=still_ahead))
|
({'this': self.this, 'still_ahead': still_ahead}))
|
||||||
return still_ahead
|
return still_ahead
|
||||||
|
|
||||||
def check_mastership(self, eval_interval, api_client):
|
def check_mastership(self, eval_interval, api_client):
|
||||||
|
@ -160,7 +160,7 @@ class PartitionedAlarmService(AlarmService, rpc_service.Service):
|
|||||||
self.partition_coordinator = coordination.PartitionCoordinator()
|
self.partition_coordinator = coordination.PartitionCoordinator()
|
||||||
|
|
||||||
def initialize_service_hook(self, service):
|
def initialize_service_hook(self, service):
|
||||||
LOG.debug('initialize_service_hooks')
|
LOG.debug(_('initialize_service_hooks'))
|
||||||
self.conn.create_worker(
|
self.conn.create_worker(
|
||||||
cfg.CONF.alarm.partition_rpc_topic,
|
cfg.CONF.alarm.partition_rpc_topic,
|
||||||
rpc_dispatcher.RpcDispatcher([self]),
|
rpc_dispatcher.RpcDispatcher([self]),
|
||||||
@ -218,7 +218,7 @@ class AlarmNotifierService(rpc_service.Service):
|
|||||||
self.tg.add_timer(604800, lambda: None)
|
self.tg.add_timer(604800, lambda: None)
|
||||||
|
|
||||||
def initialize_service_hook(self, service):
|
def initialize_service_hook(self, service):
|
||||||
LOG.debug('initialize_service_hooks')
|
LOG.debug(_('initialize_service_hooks'))
|
||||||
self.conn.create_worker(
|
self.conn.create_worker(
|
||||||
cfg.CONF.alarm.notifier_rpc_topic,
|
cfg.CONF.alarm.notifier_rpc_topic,
|
||||||
rpc_dispatcher.RpcDispatcher([self]),
|
rpc_dispatcher.RpcDispatcher([self]),
|
||||||
@ -245,8 +245,8 @@ class AlarmNotifierService(rpc_service.Service):
|
|||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
LOG.debug("Notifying alarm %s with action %s",
|
LOG.debug(_("Notifying alarm %(id)s with action %(act)s") % (
|
||||||
alarm_id, action)
|
{'id': alarm_id, 'act': action}))
|
||||||
notifier.notify(action, alarm_id, previous, current, reason)
|
notifier.notify(action, alarm_id, previous, current, reason)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception(_("Unable to notify alarm %s"), alarm_id)
|
LOG.exception(_("Unable to notify alarm %s"), alarm_id)
|
||||||
|
@ -117,14 +117,16 @@ def start():
|
|||||||
host, port = cfg.CONF.api.host, cfg.CONF.api.port
|
host, port = cfg.CONF.api.host, cfg.CONF.api.port
|
||||||
srv = simple_server.make_server(host, port, root)
|
srv = simple_server.make_server(host, port, root)
|
||||||
|
|
||||||
LOG.info('Starting server in PID %s' % os.getpid())
|
LOG.info(_('Starting server in PID %s') % os.getpid())
|
||||||
LOG.info("Configuration:")
|
LOG.info(_("Configuration:"))
|
||||||
cfg.CONF.log_opt_values(LOG, logging.INFO)
|
cfg.CONF.log_opt_values(LOG, logging.INFO)
|
||||||
|
|
||||||
if host == '0.0.0.0':
|
if host == '0.0.0.0':
|
||||||
LOG.info('serving on 0.0.0.0:%s, view at http://127.0.0.1:%s' %
|
LOG.info(_(
|
||||||
(port, port))
|
'serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s')
|
||||||
|
% ({'sport': port, 'vport': port}))
|
||||||
else:
|
else:
|
||||||
LOG.info("serving on http://%s:%s" % (host, port))
|
LOG.info(_("serving on http://%(host)s:%(port)s") % (
|
||||||
|
{'host': host, 'port': port}))
|
||||||
|
|
||||||
srv.serve_forever()
|
srv.serve_forever()
|
||||||
|
@ -654,12 +654,12 @@ class Statistics(_Base):
|
|||||||
self.duration_start and
|
self.duration_start and
|
||||||
self.duration_start < start_timestamp):
|
self.duration_start < start_timestamp):
|
||||||
self.duration_start = start_timestamp
|
self.duration_start = start_timestamp
|
||||||
LOG.debug('clamping min timestamp to range')
|
LOG.debug(_('clamping min timestamp to range'))
|
||||||
if (end_timestamp and
|
if (end_timestamp and
|
||||||
self.duration_end and
|
self.duration_end and
|
||||||
self.duration_end > end_timestamp):
|
self.duration_end > end_timestamp):
|
||||||
self.duration_end = end_timestamp
|
self.duration_end = end_timestamp
|
||||||
LOG.debug('clamping max timestamp to range')
|
LOG.debug(_('clamping max timestamp to range'))
|
||||||
|
|
||||||
# If we got valid timestamps back, compute a duration in seconds.
|
# If we got valid timestamps back, compute a duration in seconds.
|
||||||
#
|
#
|
||||||
@ -800,7 +800,8 @@ class MeterController(rest.RestController):
|
|||||||
computed = pecan.request.storage_conn.get_meter_statistics(f,
|
computed = pecan.request.storage_conn.get_meter_statistics(f,
|
||||||
period,
|
period,
|
||||||
g)
|
g)
|
||||||
LOG.debug('computed value coming from %r', pecan.request.storage_conn)
|
LOG.debug(_('computed value coming from %r'),
|
||||||
|
pecan.request.storage_conn)
|
||||||
# Find the original timestamp in the query to use for clamping
|
# Find the original timestamp in the query to use for clamping
|
||||||
# the duration returned in the statistics.
|
# the duration returned in the statistics.
|
||||||
start = end = None
|
start = end = None
|
||||||
@ -1347,7 +1348,7 @@ class AlarmController(rest.RestController):
|
|||||||
try:
|
try:
|
||||||
alarm_in = storage.models.Alarm(**updated_alarm)
|
alarm_in = storage.models.Alarm(**updated_alarm)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Error while putting alarm: %s" % updated_alarm)
|
LOG.exception(_("Error while putting alarm: %s") % updated_alarm)
|
||||||
raise ClientSideError(_("Alarm incorrect"))
|
raise ClientSideError(_("Alarm incorrect"))
|
||||||
|
|
||||||
alarm = self.conn.update_alarm(alarm_in)
|
alarm = self.conn.update_alarm(alarm_in)
|
||||||
@ -1486,7 +1487,7 @@ class AlarmsController(rest.RestController):
|
|||||||
try:
|
try:
|
||||||
alarm_in = storage.models.Alarm(**change)
|
alarm_in = storage.models.Alarm(**change)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Error while posting alarm: %s" % change)
|
LOG.exception(_("Error while posting alarm: %s") % change)
|
||||||
raise ClientSideError(_("Alarm incorrect"))
|
raise ClientSideError(_("Alarm incorrect"))
|
||||||
|
|
||||||
alarm = conn.create_alarm(alarm_in)
|
alarm = conn.create_alarm(alarm_in)
|
||||||
|
@ -29,6 +29,7 @@ import webob
|
|||||||
|
|
||||||
from ceilometer.api import hooks
|
from ceilometer.api import hooks
|
||||||
from ceilometer.openstack.common import gettextutils
|
from ceilometer.openstack.common import gettextutils
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
|
|
||||||
LOG = log.getLogger(__name__)
|
LOG = log.getLogger(__name__)
|
||||||
@ -108,7 +109,7 @@ class ParsableErrorMiddleware(object):
|
|||||||
body = ['<error_message>' + etree.tostring(fault)
|
body = ['<error_message>' + etree.tostring(fault)
|
||||||
+ '</error_message>']
|
+ '</error_message>']
|
||||||
except etree.XMLSyntaxError as err:
|
except etree.XMLSyntaxError as err:
|
||||||
LOG.error('Error parsing HTTP response: %s' % err)
|
LOG.error(_('Error parsing HTTP response: %s') % err)
|
||||||
body = ['<error_message>%s' % state['status_code']
|
body = ['<error_message>%s' % state['status_code']
|
||||||
+ '</error_message>']
|
+ '</error_message>']
|
||||||
state['headers'].append(('Content-Type', 'application/xml'))
|
state['headers'].append(('Content-Type', 'application/xml'))
|
||||||
|
@ -89,6 +89,7 @@ import datetime
|
|||||||
|
|
||||||
import flask
|
import flask
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
|
|
||||||
@ -565,14 +566,20 @@ def compute_duration_by_resource(resource, meter):
|
|||||||
|
|
||||||
# "Clamp" the timestamps we return to the original time
|
# "Clamp" the timestamps we return to the original time
|
||||||
# range, excluding the offset.
|
# range, excluding the offset.
|
||||||
LOG.debug('start_timestamp %s, end_timestamp %s, min_ts %s, max_ts %s',
|
LOG.debug(_('start_timestamp %(start_timestamp)s, '
|
||||||
start_timestamp, end_timestamp, min_ts, max_ts)
|
'end_timestamp %(end_timestamp)s, '
|
||||||
|
'min_ts %(min_ts)s, '
|
||||||
|
'max_ts %(max_ts)s') % (
|
||||||
|
{'start_timestamp': start_timestamp,
|
||||||
|
'end_timestamp': end_timestamp,
|
||||||
|
'min_ts': min_ts,
|
||||||
|
'max_ts': max_ts}))
|
||||||
if start_timestamp and min_ts and min_ts < start_timestamp:
|
if start_timestamp and min_ts and min_ts < start_timestamp:
|
||||||
min_ts = start_timestamp
|
min_ts = start_timestamp
|
||||||
LOG.debug('clamping min timestamp to range')
|
LOG.debug(_('clamping min timestamp to range'))
|
||||||
if end_timestamp and max_ts and max_ts > end_timestamp:
|
if end_timestamp and max_ts and max_ts > end_timestamp:
|
||||||
max_ts = end_timestamp
|
max_ts = end_timestamp
|
||||||
LOG.debug('clamping max timestamp to range')
|
LOG.debug(_('clamping max timestamp to range'))
|
||||||
|
|
||||||
# If we got valid timestamps back, compute a duration in minutes.
|
# If we got valid timestamps back, compute a duration in minutes.
|
||||||
#
|
#
|
||||||
|
@ -21,6 +21,7 @@ from oslo.config import cfg
|
|||||||
from stevedore import extension
|
from stevedore import extension
|
||||||
|
|
||||||
from ceilometer import agent
|
from ceilometer import agent
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common.rpc import service as rpc_service
|
from ceilometer.openstack.common.rpc import service as rpc_service
|
||||||
from ceilometer.openstack.common import service as os_service
|
from ceilometer.openstack.common import service as os_service
|
||||||
@ -40,15 +41,16 @@ class PollingTask(agent.PollingTask):
|
|||||||
cache = {}
|
cache = {}
|
||||||
for pollster in self.pollsters:
|
for pollster in self.pollsters:
|
||||||
try:
|
try:
|
||||||
LOG.info("Polling pollster %s", pollster.name)
|
LOG.info(_("Polling pollster %s"), pollster.name)
|
||||||
samples = list(pollster.obj.get_samples(
|
samples = list(pollster.obj.get_samples(
|
||||||
self.manager,
|
self.manager,
|
||||||
cache,
|
cache,
|
||||||
))
|
))
|
||||||
publisher(samples)
|
publisher(samples)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning('Continue after error from %s: %s',
|
LOG.warning(_(
|
||||||
pollster.name, err)
|
'Continue after error from %(name)s: %(error)s')
|
||||||
|
% ({'name': pollster.name, 'error': err}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
|
||||||
|
|
||||||
|
@ -79,7 +79,7 @@ class CollectorService(service.DispatchedService, rpc_service.Service):
|
|||||||
sample['counter_volume'] = sample['volume']
|
sample['counter_volume'] = sample['volume']
|
||||||
sample['counter_unit'] = sample['unit']
|
sample['counter_unit'] = sample['unit']
|
||||||
sample['counter_type'] = sample['type']
|
sample['counter_type'] = sample['type']
|
||||||
LOG.debug("UDP: Storing %s", str(sample))
|
LOG.debug(_("UDP: Storing %s"), str(sample))
|
||||||
self.dispatcher_manager.map_method('record_metering_data',
|
self.dispatcher_manager.map_method('record_metering_data',
|
||||||
sample)
|
sample)
|
||||||
except Exception:
|
except Exception:
|
||||||
|
@ -22,6 +22,7 @@ from stevedore import extension
|
|||||||
from ceilometer import agent
|
from ceilometer import agent
|
||||||
from ceilometer.compute.virt import inspector as virt_inspector
|
from ceilometer.compute.virt import inspector as virt_inspector
|
||||||
from ceilometer import nova_client
|
from ceilometer import nova_client
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common.rpc import service as rpc_service
|
from ceilometer.openstack.common.rpc import service as rpc_service
|
||||||
from ceilometer.openstack.common import service as os_service
|
from ceilometer.openstack.common import service as os_service
|
||||||
@ -39,7 +40,7 @@ class PollingTask(agent.PollingTask):
|
|||||||
cache = {}
|
cache = {}
|
||||||
for pollster in self.pollsters:
|
for pollster in self.pollsters:
|
||||||
try:
|
try:
|
||||||
LOG.info("Polling pollster %s", pollster.name)
|
LOG.info(_("Polling pollster %s"), pollster.name)
|
||||||
samples = list(pollster.obj.get_samples(
|
samples = list(pollster.obj.get_samples(
|
||||||
self.manager,
|
self.manager,
|
||||||
cache,
|
cache,
|
||||||
@ -47,15 +48,16 @@ class PollingTask(agent.PollingTask):
|
|||||||
))
|
))
|
||||||
publisher(samples)
|
publisher(samples)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning('Continue after error from %s: %s',
|
LOG.warning(_(
|
||||||
pollster.name, err)
|
'Continue after error from %(name)s: %(error)s')
|
||||||
|
% ({'name': pollster.name, 'error': err}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
|
||||||
def poll_and_publish(self):
|
def poll_and_publish(self):
|
||||||
try:
|
try:
|
||||||
instances = self.manager.nv.instance_get_all_by_host(cfg.CONF.host)
|
instances = self.manager.nv.instance_get_all_by_host(cfg.CONF.host)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.exception('Unable to retrieve instances: %s', err)
|
LOG.exception(_('Unable to retrieve instances: %s') % err)
|
||||||
else:
|
else:
|
||||||
self.poll_and_publish_instances(instances)
|
self.poll_and_publish_instances(instances)
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
from ceilometer.compute import plugin
|
from ceilometer.compute import plugin
|
||||||
from ceilometer.compute.pollsters import util
|
from ceilometer.compute.pollsters import util
|
||||||
from ceilometer.compute.virt import inspector as virt_inspector
|
from ceilometer.compute.virt import inspector as virt_inspector
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer import sample
|
from ceilometer import sample
|
||||||
|
|
||||||
@ -30,12 +31,12 @@ LOG = log.getLogger(__name__)
|
|||||||
class CPUPollster(plugin.ComputePollster):
|
class CPUPollster(plugin.ComputePollster):
|
||||||
|
|
||||||
def get_samples(self, manager, cache, instance):
|
def get_samples(self, manager, cache, instance):
|
||||||
LOG.info('checking instance %s', instance.id)
|
LOG.info(_('checking instance %s'), instance.id)
|
||||||
instance_name = util.instance_name(instance)
|
instance_name = util.instance_name(instance)
|
||||||
try:
|
try:
|
||||||
cpu_info = manager.inspector.inspect_cpus(instance_name)
|
cpu_info = manager.inspector.inspect_cpus(instance_name)
|
||||||
LOG.info("CPUTIME USAGE: %s %d",
|
LOG.info(_("CPUTIME USAGE: %(instance)s %(time)d") % (
|
||||||
instance.__dict__, cpu_info.time)
|
{'instance': instance.__dict__, 'time': cpu_info.time}))
|
||||||
cpu_num = {'cpu_number': cpu_info.number}
|
cpu_num = {'cpu_number': cpu_info.number}
|
||||||
yield util.make_sample_from_instance(
|
yield util.make_sample_from_instance(
|
||||||
instance,
|
instance,
|
||||||
@ -47,8 +48,8 @@ class CPUPollster(plugin.ComputePollster):
|
|||||||
)
|
)
|
||||||
except virt_inspector.InstanceNotFoundException as err:
|
except virt_inspector.InstanceNotFoundException as err:
|
||||||
# Instance was deleted while getting samples. Ignore it.
|
# Instance was deleted while getting samples. Ignore it.
|
||||||
LOG.debug('Exception while getting samples %s', err)
|
LOG.debug(_('Exception while getting samples %s'), err)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.error('could not get CPU time for %s: %s',
|
LOG.error(_('could not get CPU time for %(id)s: %(e)s') % (
|
||||||
instance.id, err)
|
{'id': instance.id, 'e': err}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
@ -24,6 +24,7 @@ import collections
|
|||||||
from ceilometer.compute import plugin
|
from ceilometer.compute import plugin
|
||||||
from ceilometer.compute.pollsters import util
|
from ceilometer.compute.pollsters import util
|
||||||
from ceilometer.compute.virt import inspector as virt_inspector
|
from ceilometer.compute.virt import inspector as virt_inspector
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer import sample
|
from ceilometer import sample
|
||||||
|
|
||||||
@ -89,10 +90,10 @@ class _Base(plugin.ComputePollster):
|
|||||||
yield self._get_sample(instance, c_data)
|
yield self._get_sample(instance, c_data)
|
||||||
except virt_inspector.InstanceNotFoundException as err:
|
except virt_inspector.InstanceNotFoundException as err:
|
||||||
# Instance was deleted while getting samples. Ignore it.
|
# Instance was deleted while getting samples. Ignore it.
|
||||||
LOG.debug('Exception while getting samples %s', err)
|
LOG.debug(_('Exception while getting samples %s'), err)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning('Ignoring instance %s: %s',
|
LOG.warning(_('Ignoring instance %(name)s: %(error)s') % (
|
||||||
instance_name, err)
|
{'name': instance_name, 'error': err}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import copy
|
|||||||
from ceilometer.compute import plugin
|
from ceilometer.compute import plugin
|
||||||
from ceilometer.compute.pollsters import util
|
from ceilometer.compute.pollsters import util
|
||||||
from ceilometer.compute.virt import inspector as virt_inspector
|
from ceilometer.compute.virt import inspector as virt_inspector
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
from ceilometer import sample
|
from ceilometer import sample
|
||||||
@ -73,7 +74,7 @@ class _Base(plugin.ComputePollster):
|
|||||||
|
|
||||||
def get_samples(self, manager, cache, instance):
|
def get_samples(self, manager, cache, instance):
|
||||||
instance_name = util.instance_name(instance)
|
instance_name = util.instance_name(instance)
|
||||||
LOG.info('checking instance %s', instance.id)
|
LOG.info(_('checking instance %s'), instance.id)
|
||||||
try:
|
try:
|
||||||
vnics = self._get_vnics_for_instance(
|
vnics = self._get_vnics_for_instance(
|
||||||
cache,
|
cache,
|
||||||
@ -86,10 +87,10 @@ class _Base(plugin.ComputePollster):
|
|||||||
yield self._get_sample(instance, vnic, info)
|
yield self._get_sample(instance, vnic, info)
|
||||||
except virt_inspector.InstanceNotFoundException as err:
|
except virt_inspector.InstanceNotFoundException as err:
|
||||||
# Instance was deleted while getting samples. Ignore it.
|
# Instance was deleted while getting samples. Ignore it.
|
||||||
LOG.debug('Exception while getting samples %s', err)
|
LOG.debug(_('Exception while getting samples %s'), err)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning('Ignoring instance %s: %s',
|
LOG.warning(_('Ignoring instance %(name)s: %(error)s') % (
|
||||||
instance_name, err)
|
{'name': instance_name, 'error': err}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import collections
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from stevedore import driver
|
from stevedore import driver
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
|
|
||||||
|
|
||||||
@ -151,5 +152,5 @@ def get_hypervisor_inspector():
|
|||||||
invoke_on_load=True)
|
invoke_on_load=True)
|
||||||
return mgr.driver
|
return mgr.driver
|
||||||
except ImportError as e:
|
except ImportError as e:
|
||||||
LOG.error("Unable to load the hypervisor inspector: %s" % (e))
|
LOG.error(_("Unable to load the hypervisor inspector: %s") % (e))
|
||||||
return Inspector()
|
return Inspector()
|
||||||
|
@ -21,6 +21,7 @@ from lxml import etree
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
from ceilometer.compute.virt import inspector as virt_inspector
|
from ceilometer.compute.virt import inspector as virt_inspector
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log as logging
|
from ceilometer.openstack.common import log as logging
|
||||||
|
|
||||||
libvirt = None
|
libvirt = None
|
||||||
@ -60,7 +61,7 @@ class LibvirtInspector(virt_inspector.Inspector):
|
|||||||
if libvirt is None:
|
if libvirt is None:
|
||||||
libvirt = __import__('libvirt')
|
libvirt = __import__('libvirt')
|
||||||
|
|
||||||
LOG.debug('Connecting to libvirt: %s', self.uri)
|
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
|
||||||
self.connection = libvirt.openReadOnly(self.uri)
|
self.connection = libvirt.openReadOnly(self.uri)
|
||||||
|
|
||||||
return self.connection
|
return self.connection
|
||||||
@ -73,7 +74,7 @@ class LibvirtInspector(virt_inspector.Inspector):
|
|||||||
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
|
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
|
||||||
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
|
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
|
||||||
libvirt.VIR_FROM_RPC)):
|
libvirt.VIR_FROM_RPC)):
|
||||||
LOG.debug('Connection to libvirt broke')
|
LOG.debug(_('Connection to libvirt broke'))
|
||||||
return False
|
return False
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from ceilometer import dispatcher
|
from ceilometer import dispatcher
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
from ceilometer.publisher import rpc as publisher_rpc
|
from ceilometer.publisher import rpc as publisher_rpc
|
||||||
@ -46,11 +47,13 @@ class DatabaseDispatcher(dispatcher.Base):
|
|||||||
data = [data]
|
data = [data]
|
||||||
|
|
||||||
for meter in data:
|
for meter in data:
|
||||||
LOG.debug('metering data %s for %s @ %s: %s',
|
LOG.debug(_(
|
||||||
meter['counter_name'],
|
'metering data %(counter_name)s '
|
||||||
meter['resource_id'],
|
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
|
||||||
meter.get('timestamp', 'NO TIMESTAMP'),
|
% ({'counter_name': meter['counter_name'],
|
||||||
meter['counter_volume'])
|
'resource_id': meter['resource_id'],
|
||||||
|
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
|
||||||
|
'counter_volume': meter['counter_volume']}))
|
||||||
if publisher_rpc.verify_signature(
|
if publisher_rpc.verify_signature(
|
||||||
meter,
|
meter,
|
||||||
self.conf.publisher_rpc.metering_secret):
|
self.conf.publisher_rpc.metering_secret):
|
||||||
@ -63,10 +66,11 @@ class DatabaseDispatcher(dispatcher.Base):
|
|||||||
meter['timestamp'] = timeutils.normalize_time(ts)
|
meter['timestamp'] = timeutils.normalize_time(ts)
|
||||||
self.storage_conn.record_metering_data(meter)
|
self.storage_conn.record_metering_data(meter)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.exception('Failed to record metering data: %s', err)
|
LOG.exception(_('Failed to record metering data: %s'),
|
||||||
|
err)
|
||||||
else:
|
else:
|
||||||
LOG.warning(
|
LOG.warning(_(
|
||||||
'message signature invalid, discarding message: %r',
|
'message signature invalid, discarding message: %r'),
|
||||||
meter)
|
meter)
|
||||||
|
|
||||||
def record_events(self, events):
|
def record_events(self, events):
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
|
|
||||||
from ceilometer.central import plugin
|
from ceilometer.central import plugin
|
||||||
from ceilometer import nova_client
|
from ceilometer import nova_client
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common import timeutils
|
from ceilometer.openstack.common import timeutils
|
||||||
from ceilometer import sample
|
from ceilometer import sample
|
||||||
@ -41,7 +42,7 @@ class FloatingIPPollster(plugin.CentralPollster):
|
|||||||
|
|
||||||
def get_samples(self, manager, cache):
|
def get_samples(self, manager, cache):
|
||||||
for ip in self._iter_floating_ips(cache):
|
for ip in self._iter_floating_ips(cache):
|
||||||
self.LOG.info("FLOATING IP USAGE: %s" % ip.ip)
|
self.LOG.info(_("FLOATING IP USAGE: %s") % ip.ip)
|
||||||
# FIXME (flwang) Now Nova API /os-floating-ips can't provide those
|
# FIXME (flwang) Now Nova API /os-floating-ips can't provide those
|
||||||
# attributes were used by Ceilometer, such as project id, host.
|
# attributes were used by Ceilometer, such as project id, host.
|
||||||
# In this fix, those attributes usage will be removed temporarily.
|
# In this fix, those attributes usage will be removed temporarily.
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer import plugin
|
from ceilometer import plugin
|
||||||
from ceilometer import sample
|
from ceilometer import sample
|
||||||
@ -75,7 +76,7 @@ class NetworkNotificationBase(plugin.NotificationBase):
|
|||||||
]
|
]
|
||||||
|
|
||||||
def process_notification(self, message):
|
def process_notification(self, message):
|
||||||
LOG.info('network notification %r', message)
|
LOG.info(_('network notification %r') % message)
|
||||||
message['payload'] = message['payload'][self.resource_name]
|
message['payload'] = message['payload'][self.resource_name]
|
||||||
counter_name = getattr(self, 'counter_name', self.resource_name)
|
counter_name = getattr(self, 'counter_name', self.resource_name)
|
||||||
unit_value = getattr(self, 'unit', self.resource_name)
|
unit_value = getattr(self, 'unit', self.resource_name)
|
||||||
|
@ -20,6 +20,7 @@ from oslo.config import cfg
|
|||||||
from stevedore import extension
|
from stevedore import extension
|
||||||
|
|
||||||
from ceilometer.openstack.common import context
|
from ceilometer.openstack.common import context
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common.rpc import service as rpc_service
|
from ceilometer.openstack.common.rpc import service as rpc_service
|
||||||
from ceilometer.openstack.common import service as os_service
|
from ceilometer.openstack.common import service as os_service
|
||||||
@ -79,7 +80,7 @@ class NotificationService(service.DispatchedService, rpc_service.Service):
|
|||||||
)
|
)
|
||||||
|
|
||||||
if not list(self.notification_manager):
|
if not list(self.notification_manager):
|
||||||
LOG.warning('Failed to load any notification handlers for %s',
|
LOG.warning(_('Failed to load any notification handlers for %s'),
|
||||||
self.NOTIFICATION_NAMESPACE)
|
self.NOTIFICATION_NAMESPACE)
|
||||||
self.notification_manager.map(self._setup_subscription)
|
self.notification_manager.map(self._setup_subscription)
|
||||||
|
|
||||||
@ -97,9 +98,11 @@ class NotificationService(service.DispatchedService, rpc_service.Service):
|
|||||||
"""
|
"""
|
||||||
handler = ext.obj
|
handler = ext.obj
|
||||||
ack_on_error = cfg.CONF.notification.ack_on_event_error
|
ack_on_error = cfg.CONF.notification.ack_on_event_error
|
||||||
LOG.debug('Event types from %s: %s (ack_on_error=%s)',
|
LOG.debug(_('Event types from %(name)s: %(type)s'
|
||||||
ext.name, ', '.join(handler.event_types),
|
' (ack_on_error=%(error)s)') %
|
||||||
ack_on_error)
|
{'name': ext.name,
|
||||||
|
'type': ', '.join(handler.event_types),
|
||||||
|
'error': ack_on_error})
|
||||||
|
|
||||||
for exchange_topic in handler.get_exchange_topics(cfg.CONF):
|
for exchange_topic in handler.get_exchange_topics(cfg.CONF):
|
||||||
for topic in exchange_topic.topics:
|
for topic in exchange_topic.topics:
|
||||||
@ -111,8 +114,10 @@ class NotificationService(service.DispatchedService, rpc_service.Service):
|
|||||||
exchange_name=exchange_topic.exchange,
|
exchange_name=exchange_topic.exchange,
|
||||||
ack_on_error=ack_on_error)
|
ack_on_error=ack_on_error)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('Could not join consumer pool %s/%s' %
|
LOG.exception(_('Could not join consumer pool'
|
||||||
(topic, exchange_topic.exchange))
|
' %(topic)s/%(exchange)s') %
|
||||||
|
{'topic': topic,
|
||||||
|
'exchange': exchange_topic.exchange})
|
||||||
|
|
||||||
def process_notification(self, notification):
|
def process_notification(self, notification):
|
||||||
"""RPC endpoint for notification messages
|
"""RPC endpoint for notification messages
|
||||||
@ -121,7 +126,7 @@ class NotificationService(service.DispatchedService, rpc_service.Service):
|
|||||||
bus, this method receives it. See _setup_subscription().
|
bus, this method receives it. See _setup_subscription().
|
||||||
|
|
||||||
"""
|
"""
|
||||||
LOG.debug('notification %r', notification.get('event_type'))
|
LOG.debug(_('notification %r'), notification.get('event_type'))
|
||||||
self.notification_manager.map(self._process_notification_for_ext,
|
self.notification_manager.map(self._process_notification_for_ext,
|
||||||
notification=notification)
|
notification=notification)
|
||||||
|
|
||||||
@ -151,7 +156,7 @@ class NotificationService(service.DispatchedService, rpc_service.Service):
|
|||||||
message_id = body.get('message_id')
|
message_id = body.get('message_id')
|
||||||
event_type = body['event_type']
|
event_type = body['event_type']
|
||||||
when = self._extract_when(body)
|
when = self._extract_when(body)
|
||||||
LOG.debug('Saving event "%s"', event_type)
|
LOG.debug(_('Saving event "%s"'), event_type)
|
||||||
|
|
||||||
publisher = body.get('publisher_id')
|
publisher = body.get('publisher_id')
|
||||||
request_id = body.get('_context_request_id')
|
request_id = body.get('_context_request_id')
|
||||||
|
@ -21,6 +21,7 @@ from ceilometer.openstack.common import log as logging
|
|||||||
from ceilometer import pipeline
|
from ceilometer import pipeline
|
||||||
from ceilometer import transformer
|
from ceilometer import transformer
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from stevedore import extension
|
from stevedore import extension
|
||||||
|
|
||||||
|
|
||||||
@ -36,14 +37,14 @@ def _load_notification_manager():
|
|||||||
|
|
||||||
namespace = 'ceilometer.notification'
|
namespace = 'ceilometer.notification'
|
||||||
|
|
||||||
LOG.debug('loading notification handlers from %s', namespace)
|
LOG.debug(_('loading notification handlers from %s'), namespace)
|
||||||
|
|
||||||
_notification_manager = extension.ExtensionManager(
|
_notification_manager = extension.ExtensionManager(
|
||||||
namespace=namespace,
|
namespace=namespace,
|
||||||
invoke_on_load=True)
|
invoke_on_load=True)
|
||||||
|
|
||||||
if not list(_notification_manager):
|
if not list(_notification_manager):
|
||||||
LOG.warning('Failed to load any notification handlers for %s',
|
LOG.warning(_('Failed to load any notification handlers for %s'),
|
||||||
namespace)
|
namespace)
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ import os
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer import publisher
|
from ceilometer import publisher
|
||||||
|
|
||||||
@ -125,7 +126,7 @@ class Pipeline(object):
|
|||||||
try:
|
try:
|
||||||
self.publishers.append(publisher.get_publisher(p))
|
self.publishers.append(publisher.get_publisher(p))
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Unable to load publisher %s", p)
|
LOG.exception(_("Unable to load publisher %s"), p)
|
||||||
|
|
||||||
self.transformers = self._setup_transformers(cfg, transformer_manager)
|
self.transformers = self._setup_transformers(cfg, transformer_manager)
|
||||||
|
|
||||||
@ -168,11 +169,11 @@ class Pipeline(object):
|
|||||||
"No transformer named %s loaded" % transformer['name'],
|
"No transformer named %s loaded" % transformer['name'],
|
||||||
cfg)
|
cfg)
|
||||||
transformers.append(ext.plugin(**parameter))
|
transformers.append(ext.plugin(**parameter))
|
||||||
LOG.info("Pipeline %s: Setup transformer instance %s "
|
LOG.info(_(
|
||||||
"with parameter %s",
|
"Pipeline %(pipeline)s: Setup transformer instance %(name)s "
|
||||||
self,
|
"with parameter %(param)s") % ({'pipeline': self,
|
||||||
transformer['name'],
|
'name': transformer['name'],
|
||||||
parameter)
|
'param': parameter}))
|
||||||
|
|
||||||
return transformers
|
return transformers
|
||||||
|
|
||||||
@ -181,14 +182,18 @@ class Pipeline(object):
|
|||||||
for transformer in self.transformers[start:]:
|
for transformer in self.transformers[start:]:
|
||||||
sample = transformer.handle_sample(ctxt, sample)
|
sample = transformer.handle_sample(ctxt, sample)
|
||||||
if not sample:
|
if not sample:
|
||||||
LOG.debug("Pipeline %s: Sample dropped by transformer %s",
|
LOG.debug(_(
|
||||||
self, transformer)
|
"Pipeline %(pipeline)s: Sample dropped by "
|
||||||
|
"transformer %(trans)s") % ({'pipeline': self,
|
||||||
|
'trans': transformer}))
|
||||||
return
|
return
|
||||||
return sample
|
return sample
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning("Pipeline %s: Exit after error from transformer"
|
LOG.warning(_("Pipeline %(pipeline)s: "
|
||||||
"%s for %s",
|
"Exit after error from transformer "
|
||||||
self, transformer, sample)
|
"%(trans)s for %(smp)s") % ({'pipeline': self,
|
||||||
|
'trans': transformer,
|
||||||
|
'smp': sample}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
|
||||||
def _publish_samples(self, start, ctxt, samples):
|
def _publish_samples(self, start, ctxt, samples):
|
||||||
@ -204,21 +209,26 @@ class Pipeline(object):
|
|||||||
|
|
||||||
transformed_samples = []
|
transformed_samples = []
|
||||||
for sample in samples:
|
for sample in samples:
|
||||||
LOG.debug("Pipeline %s: Transform sample %s from %s transformer",
|
LOG.debug(_(
|
||||||
self, sample, start)
|
"Pipeline %(pipeline)s: Transform sample "
|
||||||
|
"%(smp)s from %(trans)s transformer") % ({'pipeline': self,
|
||||||
|
'smp': sample,
|
||||||
|
'trans': start}))
|
||||||
sample = self._transform_sample(start, ctxt, sample)
|
sample = self._transform_sample(start, ctxt, sample)
|
||||||
if sample:
|
if sample:
|
||||||
transformed_samples.append(sample)
|
transformed_samples.append(sample)
|
||||||
|
|
||||||
if transformed_samples:
|
if transformed_samples:
|
||||||
LOG.audit("Pipeline %s: Publishing samples", self)
|
LOG.audit(_("Pipeline %s: Publishing samples"), self)
|
||||||
for p in self.publishers:
|
for p in self.publishers:
|
||||||
try:
|
try:
|
||||||
p.publish_samples(ctxt, transformed_samples)
|
p.publish_samples(ctxt, transformed_samples)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception("Pipeline %s: Continue after error "
|
LOG.exception(_(
|
||||||
"from publisher %s", self, p)
|
"Pipeline %(pipeline)s: Continue after error "
|
||||||
LOG.audit("Pipeline %s: Published samples", self)
|
"from publisher %(pub)s") % ({'pipeline': self,
|
||||||
|
'pub': p}))
|
||||||
|
LOG.audit(_("Pipeline %s: Published samples") % self)
|
||||||
|
|
||||||
def publish_sample(self, ctxt, sample):
|
def publish_sample(self, ctxt, sample):
|
||||||
self.publish_samples(ctxt, [sample])
|
self.publish_samples(ctxt, [sample])
|
||||||
@ -267,16 +277,16 @@ class Pipeline(object):
|
|||||||
def flush(self, ctxt):
|
def flush(self, ctxt):
|
||||||
"""Flush data after all samples have been injected to pipeline."""
|
"""Flush data after all samples have been injected to pipeline."""
|
||||||
|
|
||||||
LOG.audit("Flush pipeline %s", self)
|
LOG.audit(_("Flush pipeline %s"), self)
|
||||||
for (i, transformer) in enumerate(self.transformers):
|
for (i, transformer) in enumerate(self.transformers):
|
||||||
try:
|
try:
|
||||||
self._publish_samples(i + 1, ctxt,
|
self._publish_samples(i + 1, ctxt,
|
||||||
list(transformer.flush(ctxt)))
|
list(transformer.flush(ctxt)))
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
LOG.warning(
|
LOG.warning(_(
|
||||||
"Pipeline %s: Error flushing "
|
"Pipeline %(pipeline)s: Error flushing "
|
||||||
"transformer %s",
|
"transformer %(trans)s") % ({'pipeline': self,
|
||||||
self, transformer)
|
'trans': transformer}))
|
||||||
LOG.exception(err)
|
LOG.exception(err)
|
||||||
|
|
||||||
def get_interval(self):
|
def get_interval(self):
|
||||||
@ -351,13 +361,13 @@ def setup_pipeline(transformer_manager):
|
|||||||
if not os.path.exists(cfg_file):
|
if not os.path.exists(cfg_file):
|
||||||
cfg_file = cfg.CONF.find_file(cfg_file)
|
cfg_file = cfg.CONF.find_file(cfg_file)
|
||||||
|
|
||||||
LOG.debug("Pipeline config file: %s", cfg_file)
|
LOG.debug(_("Pipeline config file: %s"), cfg_file)
|
||||||
|
|
||||||
with open(cfg_file) as fap:
|
with open(cfg_file) as fap:
|
||||||
data = fap.read()
|
data = fap.read()
|
||||||
|
|
||||||
pipeline_cfg = yaml.safe_load(data)
|
pipeline_cfg = yaml.safe_load(data)
|
||||||
LOG.info("Pipeline config: %s", pipeline_cfg)
|
LOG.info(_("Pipeline config: %s"), pipeline_cfg)
|
||||||
|
|
||||||
return PipelineManager(pipeline_cfg,
|
return PipelineManager(pipeline_cfg,
|
||||||
transformer_manager)
|
transformer_manager)
|
||||||
|
@ -20,6 +20,7 @@ import logging
|
|||||||
import logging.handlers
|
import logging.handlers
|
||||||
import urlparse
|
import urlparse
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer import publisher
|
from ceilometer import publisher
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ class FilePublisher(publisher.PublisherBase):
|
|||||||
self.publisher_logger = None
|
self.publisher_logger = None
|
||||||
path = parsed_url.path
|
path = parsed_url.path
|
||||||
if not path or path.lower() == 'file':
|
if not path or path.lower() == 'file':
|
||||||
LOG.error('The path for the file publisher is required')
|
LOG.error(_('The path for the file publisher is required'))
|
||||||
return
|
return
|
||||||
|
|
||||||
rfh = None
|
rfh = None
|
||||||
@ -72,8 +73,8 @@ class FilePublisher(publisher.PublisherBase):
|
|||||||
max_bytes = int(params.get('max_bytes')[0])
|
max_bytes = int(params.get('max_bytes')[0])
|
||||||
backup_count = int(params.get('backup_count')[0])
|
backup_count = int(params.get('backup_count')[0])
|
||||||
except ValueError:
|
except ValueError:
|
||||||
LOG.error('max_bytes and backup_count should be '
|
LOG.error(_('max_bytes and backup_count should be '
|
||||||
'numbers.')
|
'numbers.'))
|
||||||
return
|
return
|
||||||
# create rotating file handler
|
# create rotating file handler
|
||||||
rfh = logging.handlers.RotatingFileHandler(
|
rfh = logging.handlers.RotatingFileHandler(
|
||||||
|
@ -26,6 +26,7 @@ import urlparse
|
|||||||
|
|
||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
from ceilometer import publisher
|
from ceilometer import publisher
|
||||||
@ -154,14 +155,14 @@ class RPCPublisher(publisher.PublisherBase):
|
|||||||
self.local_queue = []
|
self.local_queue = []
|
||||||
|
|
||||||
if self.policy in ['queue', 'drop']:
|
if self.policy in ['queue', 'drop']:
|
||||||
LOG.info('Publishing policy set to %s, \
|
LOG.info(_('Publishing policy set to %s, \
|
||||||
override backend retry config to 1' % self.policy)
|
override backend retry config to 1') % self.policy)
|
||||||
override_backend_retry_config(1)
|
override_backend_retry_config(1)
|
||||||
|
|
||||||
elif self.policy == 'default':
|
elif self.policy == 'default':
|
||||||
LOG.info('Publishing policy set to %s' % self.policy)
|
LOG.info(_('Publishing policy set to %s') % self.policy)
|
||||||
else:
|
else:
|
||||||
LOG.warn('Publishing policy is unknown (%s) force to default'
|
LOG.warn(_('Publishing policy is unknown (%s) force to default')
|
||||||
% self.policy)
|
% self.policy)
|
||||||
self.policy = 'default'
|
self.policy = 'default'
|
||||||
|
|
||||||
@ -186,8 +187,8 @@ class RPCPublisher(publisher.PublisherBase):
|
|||||||
'version': '1.0',
|
'version': '1.0',
|
||||||
'args': {'data': meters},
|
'args': {'data': meters},
|
||||||
}
|
}
|
||||||
LOG.audit('Publishing %d samples on %s',
|
LOG.audit(_('Publishing %(m)d samples on %(t)s') % (
|
||||||
len(msg['args']['data']), topic)
|
{'m': len(msg['args']['data']), 't': topic}))
|
||||||
self.local_queue.append((context, topic, msg))
|
self.local_queue.append((context, topic, msg))
|
||||||
|
|
||||||
if self.per_meter_topic:
|
if self.per_meter_topic:
|
||||||
@ -200,8 +201,8 @@ class RPCPublisher(publisher.PublisherBase):
|
|||||||
'args': {'data': list(meter_list)},
|
'args': {'data': list(meter_list)},
|
||||||
}
|
}
|
||||||
topic_name = topic + '.' + meter_name
|
topic_name = topic + '.' + meter_name
|
||||||
LOG.audit('Publishing %d samples on %s',
|
LOG.audit(_('Publishing %(m)d samples on %(n)s') % (
|
||||||
len(msg['args']['data']), topic_name)
|
{'m': len(msg['args']['data']), 'n': topic_name}))
|
||||||
self.local_queue.append((context, topic_name, msg))
|
self.local_queue.append((context, topic_name, msg))
|
||||||
|
|
||||||
self.flush()
|
self.flush()
|
||||||
@ -225,8 +226,8 @@ class RPCPublisher(publisher.PublisherBase):
|
|||||||
if queue_length > self.max_queue_length > 0:
|
if queue_length > self.max_queue_length > 0:
|
||||||
count = queue_length - self.max_queue_length
|
count = queue_length - self.max_queue_length
|
||||||
self.local_queue = self.local_queue[count:]
|
self.local_queue = self.local_queue[count:]
|
||||||
LOG.warn("Publisher max local_queue length is exceeded, "
|
LOG.warn(_("Publisher max local_queue length is exceeded, "
|
||||||
"dropping %d oldest samples", count)
|
"dropping %d oldest samples") % count)
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _process_queue(queue, policy):
|
def _process_queue(queue, policy):
|
||||||
|
@ -26,6 +26,7 @@ from oslo.config import cfg
|
|||||||
from stevedore import named
|
from stevedore import named
|
||||||
|
|
||||||
from ceilometer.openstack.common import gettextutils
|
from ceilometer.openstack.common import gettextutils
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer.openstack.common import rpc
|
from ceilometer.openstack.common import rpc
|
||||||
|
|
||||||
@ -96,7 +97,7 @@ class DispatchedService(object):
|
|||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super(DispatchedService, self).__init__(*args, **kwargs)
|
super(DispatchedService, self).__init__(*args, **kwargs)
|
||||||
LOG.debug('loading dispatchers from %s',
|
LOG.debug(_('loading dispatchers from %s'),
|
||||||
self.DISPATCHER_NAMESPACE)
|
self.DISPATCHER_NAMESPACE)
|
||||||
self.dispatcher_manager = named.NamedExtensionManager(
|
self.dispatcher_manager = named.NamedExtensionManager(
|
||||||
namespace=self.DISPATCHER_NAMESPACE,
|
namespace=self.DISPATCHER_NAMESPACE,
|
||||||
@ -104,7 +105,7 @@ class DispatchedService(object):
|
|||||||
invoke_on_load=True,
|
invoke_on_load=True,
|
||||||
invoke_args=[cfg.CONF])
|
invoke_args=[cfg.CONF])
|
||||||
if not list(self.dispatcher_manager):
|
if not list(self.dispatcher_manager):
|
||||||
LOG.warning('Failed to load any dispatchers for %s',
|
LOG.warning(_('Failed to load any dispatchers for %s'),
|
||||||
self.DISPATCHER_NAMESPACE)
|
self.DISPATCHER_NAMESPACE)
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,6 +23,7 @@ import urlparse
|
|||||||
from oslo.config import cfg
|
from oslo.config import cfg
|
||||||
from stevedore import driver
|
from stevedore import driver
|
||||||
|
|
||||||
|
from ceilometer.openstack.common.gettextutils import _ # noqa
|
||||||
from ceilometer.openstack.common import log
|
from ceilometer.openstack.common import log
|
||||||
from ceilometer import service
|
from ceilometer import service
|
||||||
from ceilometer import utils
|
from ceilometer import utils
|
||||||
@ -67,8 +68,9 @@ def get_engine(conf):
|
|||||||
conf.set_override('connection', conf.database_connection,
|
conf.set_override('connection', conf.database_connection,
|
||||||
group='database')
|
group='database')
|
||||||
engine_name = urlparse.urlparse(conf.database.connection).scheme
|
engine_name = urlparse.urlparse(conf.database.connection).scheme
|
||||||
LOG.debug('looking for %r driver in %r',
|
LOG.debug(_('looking for %(name)r driver in %(namespace)r') % (
|
||||||
engine_name, STORAGE_ENGINE_NAMESPACE)
|
{'name': engine_name,
|
||||||
|
'namespace': STORAGE_ENGINE_NAMESPACE}))
|
||||||
mgr = driver.DriverManager(STORAGE_ENGINE_NAMESPACE,
|
mgr = driver.DriverManager(STORAGE_ENGINE_NAMESPACE,
|
||||||
engine_name,
|
engine_name,
|
||||||
invoke_on_load=True)
|
invoke_on_load=True)
|
||||||
@ -140,7 +142,7 @@ def dbsync():
|
|||||||
|
|
||||||
def expirer():
|
def expirer():
|
||||||
service.prepare_service()
|
service.prepare_service()
|
||||||
LOG.debug("Clearing expired metering data")
|
LOG.debug(_("Clearing expired metering data"))
|
||||||
storage_conn = get_connection(cfg.CONF)
|
storage_conn = get_connection(cfg.CONF)
|
||||||
storage_conn.clear_expired_metering_data(
|
storage_conn.clear_expired_metering_data(
|
||||||
cfg.CONF.database.time_to_live)
|
cfg.CONF.database.time_to_live)
|
||||||
|
@ -96,8 +96,8 @@ class Connection(base.Connection):
|
|||||||
else:
|
else:
|
||||||
# This is a in-memory usage for unit tests
|
# This is a in-memory usage for unit tests
|
||||||
if Connection._memory_instance is None:
|
if Connection._memory_instance is None:
|
||||||
LOG.debug('Creating a new in-memory HBase '
|
LOG.debug(_('Creating a new in-memory HBase '
|
||||||
'Connection object')
|
'Connection object'))
|
||||||
Connection._memory_instance = MConnection()
|
Connection._memory_instance = MConnection()
|
||||||
self.conn = Connection._memory_instance
|
self.conn = Connection._memory_instance
|
||||||
else:
|
else:
|
||||||
@ -111,7 +111,7 @@ class Connection(base.Connection):
|
|||||||
self.conn.create_table(self.METER_TABLE, {'f': dict()})
|
self.conn.create_table(self.METER_TABLE, {'f': dict()})
|
||||||
|
|
||||||
def clear(self):
|
def clear(self):
|
||||||
LOG.debug('Dropping HBase schema...')
|
LOG.debug(_('Dropping HBase schema...'))
|
||||||
for table in [self.PROJECT_TABLE,
|
for table in [self.PROJECT_TABLE,
|
||||||
self.USER_TABLE,
|
self.USER_TABLE,
|
||||||
self.RESOURCE_TABLE,
|
self.RESOURCE_TABLE,
|
||||||
@ -119,11 +119,11 @@ class Connection(base.Connection):
|
|||||||
try:
|
try:
|
||||||
self.conn.disable_table(table)
|
self.conn.disable_table(table)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.debug('Cannot disable table but ignoring error')
|
LOG.debug(_('Cannot disable table but ignoring error'))
|
||||||
try:
|
try:
|
||||||
self.conn.delete_table(table)
|
self.conn.delete_table(table)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.debug('Cannot delete table but ignoring error')
|
LOG.debug(_('Cannot delete table but ignoring error'))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_connection(conf):
|
def _get_connection(conf):
|
||||||
@ -134,7 +134,8 @@ class Connection(base.Connection):
|
|||||||
The tests use a subclass to override this and return an
|
The tests use a subclass to override this and return an
|
||||||
in-memory connection.
|
in-memory connection.
|
||||||
"""
|
"""
|
||||||
LOG.debug('connecting to HBase on %s:%s', conf['host'], conf['port'])
|
LOG.debug(_('connecting to HBase on %(host)s:%(port)s') % (
|
||||||
|
{'host': conf['host'], 'port': conf['port']}))
|
||||||
return happybase.Connection(host=conf['host'], port=conf['port'],
|
return happybase.Connection(host=conf['host'], port=conf['port'],
|
||||||
table_prefix=conf['table_prefix'])
|
table_prefix=conf['table_prefix'])
|
||||||
|
|
||||||
@ -269,7 +270,7 @@ class Connection(base.Connection):
|
|||||||
:param source: Optional source filter.
|
:param source: Optional source filter.
|
||||||
"""
|
"""
|
||||||
user_table = self.conn.table(self.USER_TABLE)
|
user_table = self.conn.table(self.USER_TABLE)
|
||||||
LOG.debug("source: %s" % source)
|
LOG.debug(_("source: %s") % source)
|
||||||
scan_args = {}
|
scan_args = {}
|
||||||
if source:
|
if source:
|
||||||
scan_args['columns'] = ['f:s_%s' % source]
|
scan_args['columns'] = ['f:s_%s' % source]
|
||||||
@ -281,7 +282,7 @@ class Connection(base.Connection):
|
|||||||
:param source: Optional source filter.
|
:param source: Optional source filter.
|
||||||
"""
|
"""
|
||||||
project_table = self.conn.table(self.PROJECT_TABLE)
|
project_table = self.conn.table(self.PROJECT_TABLE)
|
||||||
LOG.debug("source: %s" % source)
|
LOG.debug(_("source: %s") % source)
|
||||||
scan_args = {}
|
scan_args = {}
|
||||||
if source:
|
if source:
|
||||||
scan_args['columns'] = ['f:s_%s' % source]
|
scan_args['columns'] = ['f:s_%s' % source]
|
||||||
@ -338,7 +339,7 @@ class Connection(base.Connection):
|
|||||||
end_op=end_timestamp_op,
|
end_op=end_timestamp_op,
|
||||||
require_meter=False,
|
require_meter=False,
|
||||||
query_only=False)
|
query_only=False)
|
||||||
LOG.debug("Query Meter table: %s" % q)
|
LOG.debug(_("Query Meter table: %s") % q)
|
||||||
meters = meter_table.scan(filter=q, row_start=start_row,
|
meters = meter_table.scan(filter=q, row_start=start_row,
|
||||||
row_stop=stop_row)
|
row_stop=stop_row)
|
||||||
|
|
||||||
@ -395,7 +396,7 @@ class Connection(base.Connection):
|
|||||||
resource_table = self.conn.table(self.RESOURCE_TABLE)
|
resource_table = self.conn.table(self.RESOURCE_TABLE)
|
||||||
q = make_query(user=user, project=project, resource=resource,
|
q = make_query(user=user, project=project, resource=resource,
|
||||||
source=source, require_meter=False, query_only=True)
|
source=source, require_meter=False, query_only=True)
|
||||||
LOG.debug("Query Resource table: %s" % q)
|
LOG.debug(_("Query Resource table: %s") % q)
|
||||||
|
|
||||||
# handle metaquery
|
# handle metaquery
|
||||||
if len(metaquery) > 0:
|
if len(metaquery) > 0:
|
||||||
@ -451,7 +452,7 @@ class Connection(base.Connection):
|
|||||||
|
|
||||||
q, start, stop = make_query_from_filter(sample_filter,
|
q, start, stop = make_query_from_filter(sample_filter,
|
||||||
require_meter=False)
|
require_meter=False)
|
||||||
LOG.debug("Query Meter Table: %s" % q)
|
LOG.debug(_("Query Meter Table: %s") % q)
|
||||||
|
|
||||||
gen = meter_table.scan(filter=q, row_start=start, row_stop=stop)
|
gen = meter_table.scan(filter=q, row_start=start, row_stop=stop)
|
||||||
|
|
||||||
@ -760,7 +761,7 @@ class MConnection(object):
|
|||||||
self.tables = {}
|
self.tables = {}
|
||||||
|
|
||||||
def open(self):
|
def open(self):
|
||||||
LOG.debug("Opening in-memory HBase connection")
|
LOG.debug(_("Opening in-memory HBase connection"))
|
||||||
|
|
||||||
def create_table(self, n, families={}):
|
def create_table(self, n, families={}):
|
||||||
if n in self.tables:
|
if n in self.tables:
|
||||||
|
@ -53,10 +53,11 @@ class Connection(base.Connection):
|
|||||||
:param data: a dictionary such as returned by
|
:param data: a dictionary such as returned by
|
||||||
ceilometer.meter.meter_message_from_counter
|
ceilometer.meter.meter_message_from_counter
|
||||||
"""
|
"""
|
||||||
LOG.info('metering data %s for %s: %s',
|
LOG.info(_('metering data %(counter_name)s for %(resource_id)s: '
|
||||||
data['counter_name'],
|
'%(counter_volume)s')
|
||||||
data['resource_id'],
|
% ({'counter_name': data['counter_name'],
|
||||||
data['counter_volume'])
|
'resource_id': data['resource_id'],
|
||||||
|
'counter_volume': data['counter_volume']}))
|
||||||
|
|
||||||
def clear_expired_metering_data(self, ttl):
|
def clear_expired_metering_data(self, ttl):
|
||||||
"""Clear expired data from the backend storage system according to the
|
"""Clear expired data from the backend storage system according to the
|
||||||
@ -65,7 +66,7 @@ class Connection(base.Connection):
|
|||||||
:param ttl: Number of seconds to keep records for.
|
:param ttl: Number of seconds to keep records for.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
LOG.info("Dropping data with TTL %d", ttl)
|
LOG.info(_("Dropping data with TTL %d"), ttl)
|
||||||
|
|
||||||
def get_users(self, source=None):
|
def get_users(self, source=None):
|
||||||
"""Return an iterable of user id strings.
|
"""Return an iterable of user id strings.
|
||||||
|
@ -916,7 +916,7 @@ class Connection(base.Connection):
|
|||||||
problem_events.append((api_models.Event.DUPLICATE,
|
problem_events.append((api_models.Event.DUPLICATE,
|
||||||
event_model))
|
event_model))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.exception('Failed to record event: %s', e)
|
LOG.exception(_('Failed to record event: %s') % e)
|
||||||
problem_events.append((api_models.Event.UNKNOWN_PROBLEM,
|
problem_events.append((api_models.Event.UNKNOWN_PROBLEM,
|
||||||
event_model))
|
event_model))
|
||||||
events.append(event)
|
events.append(event)
|
||||||
|
@ -98,10 +98,10 @@ class ScalingTransformer(transformer.TransformerBase):
|
|||||||
|
|
||||||
def handle_sample(self, context, s):
|
def handle_sample(self, context, s):
|
||||||
"""Handle a sample, converting if necessary."""
|
"""Handle a sample, converting if necessary."""
|
||||||
LOG.debug('handling sample %s', (s,))
|
LOG.debug(_('handling sample %s'), (s,))
|
||||||
if (self.source.get('unit', s.unit) == s.unit):
|
if (self.source.get('unit', s.unit) == s.unit):
|
||||||
s = self._convert(s)
|
s = self._convert(s)
|
||||||
LOG.debug(_('converted to: %s') % (s,))
|
LOG.debug(_('converted to: %s'), (s,))
|
||||||
return s
|
return s
|
||||||
|
|
||||||
|
|
||||||
@ -120,7 +120,7 @@ class RateOfChangeTransformer(ScalingTransformer):
|
|||||||
|
|
||||||
def handle_sample(self, context, s):
|
def handle_sample(self, context, s):
|
||||||
"""Handle a sample, converting if necessary."""
|
"""Handle a sample, converting if necessary."""
|
||||||
LOG.debug('handling sample %s', (s,))
|
LOG.debug(_('handling sample %s'), (s,))
|
||||||
key = s.name + s.resource_id
|
key = s.name + s.resource_id
|
||||||
prev = self.cache.get(key)
|
prev = self.cache.get(key)
|
||||||
timestamp = timeutils.parse_isotime(s.timestamp)
|
timestamp = timeutils.parse_isotime(s.timestamp)
|
||||||
@ -141,9 +141,9 @@ class RateOfChangeTransformer(ScalingTransformer):
|
|||||||
if time_delta else 0.0)
|
if time_delta else 0.0)
|
||||||
|
|
||||||
s = self._convert(s, rate_of_change)
|
s = self._convert(s, rate_of_change)
|
||||||
LOG.debug(_('converted to: %s') % (s,))
|
LOG.debug(_('converted to: %s'), (s,))
|
||||||
else:
|
else:
|
||||||
LOG.warn(_('dropping sample with no predecessor: %s') %
|
LOG.warn(_('dropping sample with no predecessor: %s'),
|
||||||
(s,))
|
(s,))
|
||||||
s = None
|
s = None
|
||||||
return s
|
return s
|
||||||
|
Loading…
Reference in New Issue
Block a user