debug level logs should not be translated

According to the OpenStack translation policy available at
https://wiki.openstack.org/wiki/LoggingStandards debug messages
should not be translated. Like mentioned in several changes in
Nova by garyk this is to help prioritize log translation.

Change-Id: Ia38a426a1a3154302b19f2dc603487175566c863
Closes-Bug: #1318384
This commit is contained in:
Christian Berendt 2014-05-11 19:50:40 +02:00
parent 2f703fe36e
commit 6d051af5d9
34 changed files with 273 additions and 280 deletions

View File

@ -28,22 +28,22 @@ class AuthorizationMiddleware(wsgi.Middleware):
def __init__(self, application, auth_providers, **local_config):
self.auth_providers = auth_providers
LOG.debug(_("Auth middleware providers: %s") % auth_providers)
LOG.debug("Auth middleware providers: %s" % auth_providers)
super(AuthorizationMiddleware, self).__init__(application,
**local_config)
def process_request(self, request):
roles = request.headers.get('X_ROLE', '').split(',')
LOG.debug(_("Processing auth request with roles: %s") % roles)
LOG.debug("Processing auth request with roles: %s" % roles)
tenant_id = request.headers.get('X-Tenant-Id', None)
LOG.debug(_("Processing auth request with tenant_id: %s") % tenant_id)
LOG.debug("Processing auth request with tenant_id: %s" % tenant_id)
for provider in self.auth_providers:
provider.authorize(request, tenant_id, roles)
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
LOG.debug(_("Created auth middleware with config: %s") %
LOG.debug("Created auth middleware with config: %s" %
local_config)
return cls(app, [TenantBasedAuth()], **local_config)
return _factory

View File

@ -19,7 +19,6 @@ from trove.common import cfg
from trove.common import exception
from trove.common import utils
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
from six.moves import configparser
@ -36,11 +35,11 @@ def _get_item(key, dictList):
def do_configs_require_restart(overrides, datastore_manager='mysql'):
rules = get_validation_rules(datastore_manager=datastore_manager)
LOG.debug(_("overrides: %s") % overrides)
LOG.debug(_("rules?: %s") % rules)
LOG.debug("overrides: %s" % overrides)
LOG.debug("rules?: %s" % rules)
for key in overrides.keys():
rule = _get_item(key, rules['configuration-parameters'])
LOG.debug(_("checking the rule: %s") % rule)
LOG.debug("checking the rule: %s" % rule)
if rule.get('restart_required'):
return True
return False

View File

@ -17,7 +17,6 @@ import routes
from trove.openstack.common import log as logging
from trove.openstack.common import extensions
from trove.openstack.common.gettextutils import _
from trove.common import cfg
from trove.common import wsgi
@ -50,7 +49,7 @@ class TroveExtensionMiddleware(extensions.ExtensionMiddleware):
# extended resources
for resource_ext in ext_mgr.get_resources():
LOG.debug(_('Extended resource: %s'), resource_ext.collection)
LOG.debug('Extended resource: %s', resource_ext.collection)
# The only difference here is that we are using our common
# wsgi.Resource instead of the openstack common wsgi.Resource
exception_map = None
@ -75,7 +74,7 @@ class TroveExtensionMiddleware(extensions.ExtensionMiddleware):
action_resources = self._action_ext_resources(application, ext_mgr,
mapper)
for action in ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
LOG.debug('Extended action: %s', action.action_name)
resource = action_resources[action.collection]
resource.add_action(action.action_name, action.handler)
@ -83,7 +82,7 @@ class TroveExtensionMiddleware(extensions.ExtensionMiddleware):
req_controllers = self._request_ext_resources(application, ext_mgr,
mapper)
for request_ext in ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
LOG.debug('Extended request: %s', request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)

View File

@ -330,7 +330,7 @@ def try_recover(func):
if recover_func is not None:
recover_func(func)
else:
LOG.debug(_("No recovery method defined for %(func)s") % {
LOG.debug("No recovery method defined for %(func)s" % {
'func': func.__name__})
raise
return _decorator

View File

@ -530,7 +530,7 @@ class ContextMiddleware(openstack_wsgi.Middleware):
@classmethod
def factory(cls, global_config, **local_config):
def _factory(app):
LOG.debug(_("Created context middleware with config: %s") %
LOG.debug("Created context middleware with config: %s" %
local_config)
return cls(app)

View File

@ -16,7 +16,6 @@
from trove.common import cfg
from trove.openstack.common.rpc import proxy
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
@ -36,7 +35,7 @@ class API(proxy.RpcProxy):
return CONF.conductor_queue
def heartbeat(self, instance_id, payload, sent=None):
LOG.debug(_("Making async call to cast heartbeat for instance: %s")
LOG.debug("Making async call to cast heartbeat for instance: %s"
% instance_id)
self.cast(self.context, self.make_msg("heartbeat",
instance_id=instance_id,
@ -45,7 +44,7 @@ class API(proxy.RpcProxy):
def update_backup(self, instance_id, backup_id, sent=None,
**backup_fields):
LOG.debug(_("Making async call to cast update_backup for instance: %s")
LOG.debug("Making async call to cast update_backup for instance: %s"
% instance_id)
self.cast(self.context, self.make_msg("update_backup",
instance_id=instance_id,

View File

@ -38,7 +38,7 @@ class Manager(periodic_task.PeriodicTasks):
"method": method_name,
"sent": sent,
}
LOG.debug(_("Instance %(instance)s sent %(method)s at %(sent)s ")
LOG.debug("Instance %(instance)s sent %(method)s at %(sent)s "
% fields)
if sent is None:
@ -55,8 +55,8 @@ class Manager(periodic_task.PeriodicTasks):
pass
if seen is None:
LOG.debug(_("[Instance %s] Did not find any previous message. "
"Creating.") % instance_id)
LOG.debug("[Instance %s] Did not find any previous message. "
"Creating." % instance_id)
seen = LastSeen.create(instance_id=instance_id,
method_name=method_name,
sent=sent)
@ -65,8 +65,8 @@ class Manager(periodic_task.PeriodicTasks):
last_sent = float(seen.sent)
if last_sent < sent:
LOG.debug(_("[Instance %s] Rec'd message is younger than last "
"seen. Updating.") % instance_id)
LOG.debug("[Instance %s] Rec'd message is younger than last "
"seen. Updating." % instance_id)
seen.sent = sent
seen.save()
return False
@ -77,8 +77,8 @@ class Manager(periodic_task.PeriodicTasks):
return True
def heartbeat(self, context, instance_id, payload, sent=None):
LOG.debug(_("Instance ID: %s") % str(instance_id))
LOG.debug(_("Payload: %s") % str(payload))
LOG.debug("Instance ID: %s" % str(instance_id))
LOG.debug("Payload: %s" % str(payload))
status = t_models.InstanceServiceStatus.find_by(
instance_id=instance_id)
if self._message_too_old(instance_id, 'heartbeat', sent):
@ -90,8 +90,8 @@ class Manager(periodic_task.PeriodicTasks):
def update_backup(self, context, instance_id, backup_id,
sent=None, **backup_fields):
LOG.debug(_("Instance ID: %s") % str(instance_id))
LOG.debug(_("Backup ID: %s") % str(backup_id))
LOG.debug("Instance ID: %s" % str(instance_id))
LOG.debug("Backup ID: %s" % str(backup_id))
backup = bkup_models.DBBackup.find_by(id=backup_id)
# TODO(datsun180b): use context to verify tenant matches
@ -125,6 +125,6 @@ class Manager(periodic_task.PeriodicTasks):
'key': k,
'value': v,
}
LOG.debug(_("Backup %(key)s: %(value)s") % fields)
LOG.debug("Backup %(key)s: %(value)s" % fields)
setattr(backup, k, v)
backup.save()

View File

@ -43,12 +43,12 @@ class Configurations(object):
if context.is_admin:
db_info = DBConfiguration.find_all(deleted=False)
if db_info is None:
LOG.debug(_("No configurations found"))
LOG.debug("No configurations found")
else:
db_info = DBConfiguration.find_all(tenant_id=context.tenant,
deleted=False)
if db_info is None:
LOG.debug(_("No configurations found for tenant % s")
LOG.debug("No configurations found for tenant % s"
% context.tenant)
limit = int(context.limit or Configurations.DEFAULT_LIMIT)
@ -85,9 +85,9 @@ class Configuration(object):
@staticmethod
def create_items(cfg_id, values):
LOG.debug(_("saving the values to the database"))
LOG.debug(_("cfg_id: %s") % cfg_id)
LOG.debug(_("values: %s") % values)
LOG.debug("saving the values to the database")
LOG.debug("cfg_id: %s" % cfg_id)
LOG.debug("values: %s" % values)
config_items = []
for key, val in values.iteritems():
config_item = ConfigurationParameter.create(
@ -107,11 +107,11 @@ class Configuration(object):
@staticmethod
def remove_all_items(context, id, deleted_at):
LOG.debug(_("removing the values from the database with configuration"
" %s") % id)
LOG.debug("removing the values from the database with configuration"
" %s" % id)
items = ConfigurationParameter.find_all(configuration_id=id,
deleted=False).all()
LOG.debug(_("removing items: %s") % items)
LOG.debug("removing items: %s" % items)
for item in items:
item.deleted = True
item.deleted_at = deleted_at
@ -149,7 +149,7 @@ class Configuration(object):
datastore_manager=datastore.manager)
def _get_rule(key):
LOG.debug(_("finding rule with key : %s") % key)
LOG.debug("finding rule with key : %s" % key)
for rule in rules['configuration-parameters']:
if str(rule.get('name')) == key:
return rule
@ -186,7 +186,7 @@ class Configuration(object):
items = Configuration.load_items(context, configuration.id)
for instance in instances:
LOG.debug(_("applying to instance: %s") % instance.id)
LOG.debug("applying to instance: %s" % instance.id)
overrides = {}
for i in items:
overrides[i.configuration_key] = i.configuration_value

View File

@ -74,8 +74,8 @@ class ConfigurationsController(wsgi.Controller):
return wsgi.Result(paged.data(), 200)
def create(self, req, body, tenant_id):
LOG.debug(_("req : '%s'\n\n") % req)
LOG.debug(_("body : '%s'\n\n") % req)
LOG.debug("req : '%s'\n\n" % req)
LOG.debug("body : '%s'\n\n" % req)
name = body['configuration']['name']
description = body['configuration'].get('description')

View File

@ -57,13 +57,13 @@ class DatabaseModelBase(models.ModelBase):
if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors)
self['updated'] = utils.utcnow()
LOG.debug(_("Saving %(name)s: %(dict)s") %
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return self.db_api.save(self)
def delete(self):
self['updated'] = utils.utcnow()
LOG.debug(_("Deleting %(name)s: %(dict)s") %
LOG.debug("Deleting %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
if self.preserve_on_delete:

View File

@ -52,12 +52,12 @@ class DnsRecord(ModelBase):
def save(self):
if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors)
LOG.debug(_("Saving %(name)s: %(dict)s") %
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self)
def delete(self):
LOG.debug(_("Deleting %(name)s: %(dict)s") %
LOG.debug("Deleting %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().delete(self)

View File

@ -25,7 +25,6 @@ from trove.db import get_db_api
from trove.guestagent.db import models as guest_models
from trove.instance import models as base_models
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -213,7 +212,7 @@ class RootHistory(object):
self.created = utils.utcnow()
def save(self):
LOG.debug(_("Saving %(name)s: %(dict)s") %
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self)

View File

@ -27,7 +27,6 @@ from trove.openstack.common import rpc
from trove.openstack.common import log as logging
from trove.openstack.common.rpc import proxy
from trove.openstack.common.rpc import common
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@ -121,30 +120,30 @@ class API(proxy.RpcProxy):
"""Make an asynchronous call to change the passwords of one or more
users.
"""
LOG.debug(_("Changing passwords for users on Instance %s"), self.id)
LOG.debug("Changing passwords for users on Instance %s", self.id)
self._cast("change_passwords", users=users)
def update_attributes(self, username, hostname, user_attrs):
"""Update user attributes."""
LOG.debug(_("Changing user attributes on Instance %s"), self.id)
LOG.debug("Changing user attributes on Instance %s", self.id)
self._cast("update_attributes", username=username, hostname=hostname,
user_attrs=user_attrs)
def create_user(self, users):
"""Make an asynchronous call to create a new database user"""
LOG.debug(_("Creating Users for Instance %s"), self.id)
LOG.debug("Creating Users for Instance %s", self.id)
self._cast("create_user", users=users)
def get_user(self, username, hostname):
"""Make an asynchronous call to get a single database user."""
LOG.debug(_("Getting a user on Instance %s"), self.id)
LOG.debug("Getting a user on Instance %s", self.id)
LOG.debug("User name is %s" % username)
return self._call("get_user", AGENT_LOW_TIMEOUT,
username=username, hostname=hostname)
def list_access(self, username, hostname):
"""Show all the databases to which a user has more than USAGE."""
LOG.debug(_("Showing user grants on Instance %s"), self.id)
LOG.debug("Showing user grants on Instance %s", self.id)
LOG.debug("User name is %s" % username)
return self._call("list_access", AGENT_LOW_TIMEOUT,
username=username, hostname=hostname)
@ -163,13 +162,13 @@ class API(proxy.RpcProxy):
def list_users(self, limit=None, marker=None, include_marker=False):
"""Make an asynchronous call to list database users"""
LOG.debug(_("Listing Users for Instance %s"), self.id)
LOG.debug("Listing Users for Instance %s", self.id)
return self._call("list_users", AGENT_LOW_TIMEOUT, limit=limit,
marker=marker, include_marker=include_marker)
def delete_user(self, user):
"""Make an asynchronous call to delete an existing database user"""
LOG.debug(_("Deleting user %(user)s for Instance %(instance_id)s") %
LOG.debug("Deleting user %(user)s for Instance %(instance_id)s" %
{'user': user, 'instance_id': self.id})
self._cast("delete_user", user=user)
@ -177,12 +176,12 @@ class API(proxy.RpcProxy):
"""Make an asynchronous call to create a new database
within the specified container
"""
LOG.debug(_("Creating databases for Instance %s"), self.id)
LOG.debug("Creating databases for Instance %s", self.id)
self._cast("create_database", databases=databases)
def list_databases(self, limit=None, marker=None, include_marker=False):
"""Make an asynchronous call to list databases"""
LOG.debug(_("Listing databases for Instance %s"), self.id)
LOG.debug("Listing databases for Instance %s", self.id)
return self._call("list_databases", AGENT_LOW_TIMEOUT, limit=limit,
marker=marker, include_marker=include_marker)
@ -190,40 +189,40 @@ class API(proxy.RpcProxy):
"""Make an asynchronous call to delete an existing database
within the specified container
"""
LOG.debug(_("Deleting database %(database)s for "
"Instance %(instance_id)s") % {'database': database,
'instance_id': self.id})
LOG.debug("Deleting database %(database)s for "
"Instance %(instance_id)s" % {'database': database,
'instance_id': self.id})
self._cast("delete_database", database=database)
def enable_root(self):
"""Make a synchronous call to enable the root user for
access from anywhere
"""
LOG.debug(_("Enable root user for Instance %s"), self.id)
LOG.debug("Enable root user for Instance %s", self.id)
return self._call("enable_root", AGENT_HIGH_TIMEOUT)
def disable_root(self):
"""Make a synchronous call to disable the root user for
access from anywhere
"""
LOG.debug(_("Disable root user for Instance %s"), self.id)
LOG.debug("Disable root user for Instance %s", self.id)
return self._call("disable_root", AGENT_LOW_TIMEOUT)
def is_root_enabled(self):
"""Make a synchronous call to check if root access is
available for the container
"""
LOG.debug(_("Check root access for Instance %s"), self.id)
LOG.debug("Check root access for Instance %s", self.id)
return self._call("is_root_enabled", AGENT_LOW_TIMEOUT)
def get_hwinfo(self):
"""Make a synchronous call to get hardware info for the container"""
LOG.debug(_("Check hwinfo on Instance %s"), self.id)
LOG.debug("Check hwinfo on Instance %s", self.id)
return self._call("get_hwinfo", AGENT_LOW_TIMEOUT)
def get_diagnostics(self):
"""Make a synchronous call to get diagnostics for the container"""
LOG.debug(_("Check diagnostics on Instance %s"), self.id)
LOG.debug("Check diagnostics on Instance %s", self.id)
return self._call("get_diagnostics", AGENT_LOW_TIMEOUT)
def prepare(self, memory_mb, packages, databases, users,
@ -233,7 +232,7 @@ class API(proxy.RpcProxy):
"""Make an asynchronous call to prepare the guest
as a database container optionally includes a backup id for restores
"""
LOG.debug(_("Sending the call to prepare the Guest"))
LOG.debug("Sending the call to prepare the Guest")
self._cast_with_consumer(
"prepare", packages=packages, databases=databases,
memory_mb=memory_mb, users=users, device_path=device_path,
@ -243,13 +242,13 @@ class API(proxy.RpcProxy):
def restart(self):
"""Restart the MySQL server."""
LOG.debug(_("Sending the call to restart MySQL on the Guest."))
LOG.debug("Sending the call to restart MySQL on the Guest.")
self._call("restart", AGENT_HIGH_TIMEOUT)
def start_db_with_conf_changes(self, config_contents):
"""Start the MySQL server."""
LOG.debug(_("Sending the call to start MySQL on the Guest with "
"a timeout of %s.") % AGENT_HIGH_TIMEOUT)
LOG.debug("Sending the call to start MySQL on the Guest with "
"a timeout of %s." % AGENT_HIGH_TIMEOUT)
self._call("start_db_with_conf_changes", AGENT_HIGH_TIMEOUT,
config_contents=config_contents)
@ -257,25 +256,25 @@ class API(proxy.RpcProxy):
"""Ignore running state of MySQL, and just change the config file
to a new flavor.
"""
LOG.debug(_("Sending the call to change MySQL conf file on the Guest "
"with a timeout of %s.") % AGENT_HIGH_TIMEOUT)
LOG.debug("Sending the call to change MySQL conf file on the Guest "
"with a timeout of %s." % AGENT_HIGH_TIMEOUT)
self._call("reset_configuration", AGENT_HIGH_TIMEOUT,
configuration=configuration)
def stop_db(self, do_not_start_on_reboot=False):
"""Stop the MySQL server."""
LOG.debug(_("Sending the call to stop MySQL on the Guest."))
LOG.debug("Sending the call to stop MySQL on the Guest.")
self._call("stop_db", AGENT_HIGH_TIMEOUT,
do_not_start_on_reboot=do_not_start_on_reboot)
def upgrade(self):
"""Make an asynchronous call to self upgrade the guest agent"""
LOG.debug(_("Sending an upgrade call to nova-guest"))
LOG.debug("Sending an upgrade call to nova-guest")
self._cast_with_consumer("upgrade")
def get_volume_info(self):
"""Make a synchronous call to get volume info for the container"""
LOG.debug(_("Check Volume Info on Instance %s"), self.id)
LOG.debug("Check Volume Info on Instance %s", self.id)
# self._check_for_hearbeat()
return self._call("get_filesystem_stats", AGENT_LOW_TIMEOUT,
fs_path=None)
@ -286,38 +285,38 @@ class API(proxy.RpcProxy):
def create_backup(self, backup_info):
"""Make async call to create a full backup of this instance"""
LOG.debug(_("Create Backup %(backup_id)s "
"for Instance %(instance_id)s") %
LOG.debug("Create Backup %(backup_id)s "
"for Instance %(instance_id)s" %
{'backup_id': backup_info['id'], 'instance_id': self.id})
self._cast("create_backup", backup_info=backup_info)
def mount_volume(self, device_path=None, mount_point=None):
"""Mount the volume"""
LOG.debug(_("Mount volume %(mount)s on instance %(id)s") % {
LOG.debug("Mount volume %(mount)s on instance %(id)s" % {
'mount': mount_point, 'id': self.id})
self._call("mount_volume", AGENT_LOW_TIMEOUT,
device_path=device_path, mount_point=mount_point)
def unmount_volume(self, device_path=None, mount_point=None):
"""Unmount the volume"""
LOG.debug(_("Unmount volume %(device)s on instance %(id)s") % {
LOG.debug("Unmount volume %(device)s on instance %(id)s" % {
'device': device_path, 'id': self.id})
self._call("unmount_volume", AGENT_LOW_TIMEOUT,
device_path=device_path, mount_point=mount_point)
def resize_fs(self, device_path=None, mount_point=None):
"""Resize the filesystem"""
LOG.debug(_("Resize device %(device)s on instance %(id)s") % {
LOG.debug("Resize device %(device)s on instance %(id)s" % {
'device': device_path, 'id': self.id})
self._call("resize_fs", AGENT_HIGH_TIMEOUT, device_path=device_path,
mount_point=mount_point)
def update_overrides(self, overrides, remove=False):
LOG.debug(_("Updating overrides on Instance %s"), self.id)
LOG.debug(_("Updating overrides values %s") % overrides)
LOG.debug("Updating overrides on Instance %s", self.id)
LOG.debug("Updating overrides values %s" % overrides)
self._cast("update_overrides", overrides=overrides, remove=remove)
def apply_overrides(self, overrides):
LOG.debug(_("Applying overrides on Instance %s"), self.id)
LOG.debug(_("Applying overrides values %s") % overrides)
LOG.debug("Applying overrides on Instance %s", self.id)
LOG.debug("Applying overrides values %s" % overrides)
self._cast("apply_overrides", overrides=overrides)

View File

@ -153,7 +153,7 @@ class BackupAgent(object):
def execute_restore(self, context, backup_info, restore_location):
try:
LOG.debug(_("Getting Restore Runner %(type)s"), backup_info)
LOG.debug("Getting Restore Runner %(type)s", backup_info)
restore_runner = self._get_restore_runner(backup_info['type'])
LOG.debug("Getting Storage Strategy")
@ -165,8 +165,8 @@ class BackupAgent(object):
checksum=backup_info['checksum'],
restore_location=restore_location)
backup_info['restore_location'] = restore_location
LOG.debug(_("Restoring instance from backup %(id)s to "
"%(restore_location)s") % backup_info)
LOG.debug("Restoring instance from backup %(id)s to "
"%(restore_location)s" % backup_info)
content_size = runner.restore()
LOG.info(_("Restore from backup %(id)s completed successfully "
"to %(restore_location)s") % backup_info)

View File

@ -80,7 +80,7 @@ class Manager(periodic_task.PeriodicTasks):
device.migrate_data(mount_point)
#mount the volume
device.mount(mount_point)
LOG.debug(_("Mounting new volume."))
LOG.debug("Mounting new volume.")
self.app.restart()
self.appStatus.end_install_or_restart()
@ -155,17 +155,17 @@ class Manager(periodic_task.PeriodicTasks):
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug(_("Mounted the volume."))
LOG.debug("Mounted the volume.")
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug(_("Unmounted the volume."))
LOG.debug("Unmounted the volume.")
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug(_("Resized the filesystem"))
LOG.debug("Resized the filesystem")
def update_overrides(self, context, overrides, remove=False):
raise exception.DatastoreOperationNotSupported(

View File

@ -112,9 +112,9 @@ class CassandraApp(object):
def _install_db(self, packages):
"""Install cassandra server"""
LOG.debug(_("Installing cassandra server"))
LOG.debug("Installing cassandra server")
packager.pkg_install(packages, None, system.TIME_OUT)
LOG.debug(_("Finished installing cassandra server"))
LOG.debug("Finished installing cassandra server")
def write_config(self, config_contents):
LOG.info(_('Defining temp config holder at '

View File

@ -70,7 +70,7 @@ class Manager(periodic_task.PeriodicTasks):
device.unmount_device(device_path)
device.format()
device.mount(mount_point)
LOG.debug(_('Mounted the volume.'))
LOG.debug('Mounted the volume.')
if root_password:
self.app.enable_root(root_password)
self.app.install_if_needed(packages)
@ -166,17 +166,17 @@ class Manager(periodic_task.PeriodicTasks):
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug(_("Mounted the volume."))
LOG.debug("Mounted the volume.")
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug(_("Unmounted the volume."))
LOG.debug("Unmounted the volume.")
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug(_("Resized the filesystem."))
LOG.debug("Resized the filesystem.")
def update_overrides(self, context, overrides, remove=False):
raise exception.DatastoreOperationNotSupported(

View File

@ -97,8 +97,8 @@ class CouchbaseApp(object):
"""
Install the Couchbase Server.
"""
LOG.debug(_('Installing Couchbase Server'))
msg = _("Creating %s") % system.COUCHBASE_CONF_DIR
LOG.debug('Installing Couchbase Server')
msg = "Creating %s" % system.COUCHBASE_CONF_DIR
LOG.debug(msg)
utils.execute_with_timeout('mkdir',
'-p',
@ -108,7 +108,7 @@ class CouchbaseApp(object):
pkg_opts = {}
packager.pkg_install(packages, pkg_opts, system.TIME_OUT)
self.start_db()
LOG.debug(_('Finished installing Couchbase Server'))
LOG.debug('Finished installing Couchbase Server')
def _enable_db_on_boot(self):
"""

View File

@ -47,7 +47,7 @@ class Manager(periodic_task.PeriodicTasks):
config_contents=None, root_password=None, overrides=None):
"""Makes ready DBAAS on a Guest container."""
LOG.debug(_("Prepare MongoDB instance"))
LOG.debug("Prepare MongoDB instance")
self.status.begin_install()
self.app.install_if_needed(packages)
@ -64,7 +64,7 @@ class Manager(periodic_task.PeriodicTasks):
device.mount(mount_point)
self.app.update_owner(mount_point)
LOG.debug(_("Mounted the volume %(path)s as %(mount)s") %
LOG.debug("Mounted the volume %(path)s as %(mount)s" %
{'path': device_path, "mount": mount_point})
if mount_point:
@ -161,17 +161,17 @@ class Manager(periodic_task.PeriodicTasks):
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug(_("Mounted the volume."))
LOG.debug("Mounted the volume.")
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug(_("Unmounted the volume."))
LOG.debug("Unmounted the volume.")
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug(_("Resized the filesystem"))
LOG.debug("Resized the filesystem")
def update_overrides(self, context, overrides, remove=False):
raise exception.DatastoreOperationNotSupported(

View File

@ -41,7 +41,7 @@ class MongoDBApp(object):
"""Prepare the guest machine with a MongoDB installation"""
LOG.info(_("Preparing Guest as MongoDB"))
if not system.PACKAGER.pkg_is_installed(packages):
LOG.debug(_("Installing packages: %s") % str(packages))
LOG.debug("Installing packages: %s" % str(packages))
system.PACKAGER.pkg_install(packages, {}, system.TIME_OUT)
LOG.info(_("Finished installing MongoDB server"))

View File

@ -124,7 +124,7 @@ class Manager(periodic_task.PeriodicTasks):
device.migrate_data(mount_point)
#mount the volume
device.mount(mount_point)
LOG.debug(_("Mounted the volume."))
LOG.debug("Mounted the volume.")
app.start_mysql()
if backup_info:
self._perform_restore(backup_info, context,
@ -184,17 +184,17 @@ class Manager(periodic_task.PeriodicTasks):
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug(_("Mounted the volume."))
LOG.debug("Mounted the volume.")
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug(_("Unmounted the volume."))
LOG.debug("Unmounted the volume.")
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug(_("Resized the filesystem"))
LOG.debug("Resized the filesystem")
def update_overrides(self, context, overrides, remove=False):
app = MySqlApp(MySqlAppStatus.get())

View File

@ -405,7 +405,7 @@ class MySqlAdmin(object):
def list_databases(self, limit=None, marker=None, include_marker=False):
"""List databases the user created on this mysql instance."""
LOG.debug(_("---Listing Databases---"))
LOG.debug("---Listing Databases---")
databases = []
with LocalSqlClient(get_engine()) as client:
# If you have an external volume mounted at /var/lib/mysql
@ -433,18 +433,18 @@ class MySqlAdmin(object):
t = text(str(q))
database_names = client.execute(t)
next_marker = None
LOG.debug(_("database_names = %r") % database_names)
LOG.debug("database_names = %r" % database_names)
for count, database in enumerate(database_names):
if count >= limit:
break
LOG.debug(_("database = %s ") % str(database))
LOG.debug("database = %s " % str(database))
mysql_db = models.MySQLDatabase()
mysql_db.name = database[0]
next_marker = mysql_db.name
mysql_db.character_set = database[1]
mysql_db.collate = database[2]
databases.append(mysql_db.serialize())
LOG.debug(_("databases = ") + str(databases))
LOG.debug("databases = " + str(databases))
if database_names.rowcount <= limit:
next_marker = None
return databases, next_marker
@ -469,7 +469,7 @@ class MySqlAdmin(object):
Marker
LIMIT :limit;
'''
LOG.debug(_("---Listing Users---"))
LOG.debug("---Listing Users---")
users = []
with LocalSqlClient(get_engine()) as client:
mysql_user = models.MySQLUser()
@ -585,14 +585,14 @@ class MySqlApp(object):
"""
LOG.info(_("Preparing Guest as MySQL Server"))
if not packager.pkg_is_installed(packages):
LOG.debug(_("Installing mysql server"))
LOG.debug("Installing mysql server")
self._clear_mysql_config()
# set blank password on pkg configuration stage
pkg_opts = {'root_password': '',
'root_password_again': ''}
packager.pkg_install(packages, pkg_opts, self.TIME_OUT)
self._create_mysql_confd_dir()
LOG.debug(_("Finished installing mysql server"))
LOG.debug("Finished installing mysql server")
self.start_mysql()
LOG.info(_("Dbaas install_if_needed complete"))
@ -704,16 +704,16 @@ class MySqlApp(object):
"""
if overrides_file:
LOG.debug(_("writing new overrides.cnf config file"))
LOG.debug("writing new overrides.cnf config file")
self._write_config_overrides(overrides_file)
if remove:
LOG.debug(_("removing overrides.cnf config file"))
LOG.debug("removing overrides.cnf config file")
self._remove_overrides()
def apply_overrides(self, overrides):
LOG.debug(_("applying overrides to mysql"))
LOG.debug("applying overrides to mysql")
with LocalSqlClient(get_engine()) as client:
LOG.debug(_("updating overrides values in running daemon"))
LOG.debug("updating overrides values in running daemon")
for k, v in overrides.iteritems():
q = sql_query.SetServerVariable(key=k, value=v)
t = text(str(q))
@ -725,8 +725,8 @@ class MySqlApp(object):
"%(value)s") % output)
def _replace_mycnf_with_template(self, template_path, original_path):
LOG.debug(_("replacing the mycnf with template"))
LOG.debug(_("template_path(%(template)s) original_path(%(origin)s)")
LOG.debug("replacing the mycnf with template")
LOG.debug("template_path(%(template)s) original_path(%(origin)s)"
% {"template": template_path, "origin": original_path})
if os.path.isfile(template_path):
if os.path.isfile(original_path):

View File

@ -87,7 +87,7 @@ class Manager(periodic_task.PeriodicTasks):
device.format()
device.mount(mount_point)
operating_system.update_owner('redis', 'redis', mount_point)
LOG.debug(_('Mounted the volume.'))
LOG.debug('Mounted the volume.')
app.install_if_needed(packages)
LOG.info(_('Securing redis now.'))
app.write_config(config_contents)
@ -140,17 +140,17 @@ class Manager(periodic_task.PeriodicTasks):
def mount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.mount(mount_point, write_to_fstab=False)
LOG.debug(_("Mounted the volume."))
LOG.debug("Mounted the volume.")
def unmount_volume(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.unmount(mount_point)
LOG.debug(_("Unmounted the volume."))
LOG.debug("Unmounted the volume.")
def resize_fs(self, context, device_path=None, mount_point=None):
device = volume.VolumeDevice(device_path)
device.resize_fs(mount_point)
LOG.debug(_("Resized the filesystem"))
LOG.debug("Resized the filesystem")
def update_overrides(self, context, overrides, remove=False):
raise exception.DatastoreOperationNotSupported(

View File

@ -158,8 +158,8 @@ class RedisApp(object):
"""
Install the redis server.
"""
LOG.debug(_('Installing redis server'))
msg = _("Creating %s") % (system.REDIS_CONF_DIR)
LOG.debug('Installing redis server')
msg = "Creating %s" % system.REDIS_CONF_DIR
LOG.debug(msg)
utils.execute_with_timeout('mkdir',
'-p',
@ -169,7 +169,7 @@ class RedisApp(object):
pkg_opts = {}
packager.pkg_install(packages, pkg_opts, TIME_OUT)
self.start_redis()
LOG.debug(_('Finished installing redis server'))
LOG.debug('Finished installing redis server')
def _enable_redis_on_boot(self):
"""

View File

@ -21,7 +21,6 @@ from trove.common import utils
from trove.db import get_db_api
from trove.db import models as dbmodels
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
LOG = logging.getLogger(__name__)
@ -55,7 +54,7 @@ class AgentHeartBeat(dbmodels.DatabaseModelBase):
if not self.is_valid():
raise exception.InvalidModelError(errors=self.errors)
self['updated_at'] = utils.utcnow()
LOG.debug(_("Saving %(name)s: %(dict)s") %
LOG.debug("Saving %(name)s: %(dict)s" %
{'name': self.__class__.__name__, 'dict': self.__dict__})
return get_db_api().save(self)

View File

@ -372,7 +372,7 @@ class DebianPackagerMixin(BasePackagerMixin):
installed_version = self.pkg_version(package_name)
if ((package_version and installed_version == package_version) or
(installed_version and not package_version)):
LOG.debug(_("Package %s already installed.") % package_name)
LOG.debug("Package %s already installed." % package_name)
else:
return False
return True

View File

@ -156,10 +156,10 @@ class VolumeMountPoint(object):
def mount(self):
if not os.path.exists(self.mount_point):
utils.execute("sudo", "mkdir", "-p", self.mount_point)
LOG.debug(_("Mounting volume. Device path:{0}, mount_point:{1}, "
"volume_type:{2}, mount options:{3}").format(
self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
LOG.debug("Mounting volume. Device path:{0}, mount_point:{1}, "
"volume_type:{2}, mount options:{3}".format(
self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
cmd = ("sudo mount -t %s -o %s %s %s" %
(self.volume_fstype, self.mount_options, self.device_path,
self.mount_point))
@ -170,7 +170,7 @@ class VolumeMountPoint(object):
fstab_line = ("%s\t%s\t%s\t%s\t0\t0" %
(self.device_path, self.mount_point, self.volume_fstype,
self.mount_options))
LOG.debug(_("Writing new line to fstab:%s") % fstab_line)
LOG.debug("Writing new line to fstab:%s" % fstab_line)
with open('/etc/fstab', "r") as fstab:
fstab_content = fstab.read()
with NamedTemporaryFile(delete=False) as tempfstab:

View File

@ -499,9 +499,9 @@ class BaseInstance(SimpleInstance):
if self.is_building:
raise exception.UnprocessableEntity("Instance %s is not ready."
% self.id)
LOG.debug(_(" ... deleting compute id = %s") %
LOG.debug(" ... deleting compute id = %s" %
self.db_info.compute_instance_id)
LOG.debug(_(" ... setting status to DELETING."))
LOG.debug(" ... setting status to DELETING.")
self.update_db(task_status=InstanceTasks.DELETING,
configuration_id=None)
task_api.API(self.context).delete_instance(self.id)
@ -594,8 +594,8 @@ class Instance(BuiltInstance):
root_on_create = CONF.get(datastore_manager).root_on_create
return root_on_create
except NoSuchOptError:
LOG.debug(_("root_on_create not configured for %s"
" hence defaulting the value to False")
LOG.debug("root_on_create not configured for %s"
" hence defaulting the value to False"
% datastore_manager)
return False
@ -657,8 +657,8 @@ class Instance(BuiltInstance):
datastore_version.id,
task_status=InstanceTasks.BUILDING,
configuration_id=configuration_id)
LOG.debug(_("Tenant %(tenant)s created new "
"Trove instance %(db)s...") %
LOG.debug("Tenant %(tenant)s created new "
"Trove instance %(db)s..." %
{'tenant': context.tenant, 'db': db_info.id})
# if a configuration group is associated with an instance,
@ -835,15 +835,15 @@ class Instance(BuiltInstance):
status=status)
def unassign_configuration(self):
LOG.debug(_("Unassigning the configuration from the instance %s")
LOG.debug("Unassigning the configuration from the instance %s"
% self.id)
if self.configuration and self.configuration.id:
LOG.debug(_("Unassigning the configuration id %s")
LOG.debug("Unassigning the configuration id %s"
% self.configuration.id)
flavor = self.get_flavor()
config_id = self.configuration.id
LOG.debug(_("configuration being unassigned; "
"marking restart required"))
LOG.debug("configuration being unassigned; "
"marking restart required")
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
task_api.API(self.context).unassign_configuration(self.id,
flavor,
@ -877,12 +877,12 @@ class Instance(BuiltInstance):
self.update_db(configuration_id=configuration.id)
def update_overrides(self, overrides):
LOG.debug(_("Updating or removing overrides for instance %s")
LOG.debug("Updating or removing overrides for instance %s"
% self.id)
need_restart = do_configs_require_restart(
overrides, datastore_manager=self.ds_version.manager)
LOG.debug(_("config overrides has non-dynamic settings, "
"requires a restart: %s") % need_restart)
LOG.debug("config overrides has non-dynamic settings, "
"requires a restart: %s" % need_restart)
if need_restart:
self.update_db(task_status=InstanceTasks.RESTART_REQUIRED)
task_api.API(self.context).update_overrides(self.id, overrides)

View File

@ -272,7 +272,7 @@ class QuotaEngine(object):
reservations = self._driver.reserve(tenant_id, self._resources, deltas)
LOG.debug(_("Created reservations %(reservations)s") %
LOG.debug("Created reservations %(reservations)s" %
{'reservations': reservations})
return reservations

View File

@ -22,7 +22,6 @@ Routes all the requests to the task manager.
from trove.common import cfg
from trove.openstack.common.rpc import proxy
from trove.openstack.common import log as logging
from trove.openstack.common.gettextutils import _
CONF = cfg.CONF
@ -124,8 +123,8 @@ class API(proxy.RpcProxy):
overrides=overrides))
def update_overrides(self, instance_id, overrides=None):
LOG.debug(_("Making async call to update configuration overrides for "
"instance %s") % instance_id)
LOG.debug("Making async call to update configuration overrides for "
"instance %s" % instance_id)
self.cast(self.context,
self.make_msg("update_overrides",
@ -133,8 +132,8 @@ class API(proxy.RpcProxy):
overrides=overrides))
def unassign_configuration(self, instance_id, flavor, configuration_id):
LOG.debug(_("Making async call to unassign configuration for "
"instance %s") % instance_id)
LOG.debug("Making async call to unassign configuration for "
"instance %s" % instance_id)
self.cast(self.context,
self.make_msg("unassign_configuration",

View File

@ -127,7 +127,7 @@ class NotifyMixin(object):
# Update payload with all other kwargs
payload.update(kwargs)
LOG.debug(_('Sending event: %(event_type)s, %(payload)s') %
LOG.debug('Sending event: %(event_type)s, %(payload)s' %
{'event_type': event_type, 'payload': payload})
notifier.notify(self.context, publisher_id, event_type, 'INFO',
payload)
@ -156,7 +156,7 @@ class ConfigurationMixin(object):
config = template.SingleInstanceConfigTemplate(
datastore_manager, flavor, instance_id)
ret = config.render_dict()
LOG.debug(_("the default template dict of mysqld section: %s") % ret)
LOG.debug("the default template dict of mysqld section: %s" % ret)
return ret
@ -166,7 +166,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
backup_id, availability_zone, root_password, nics,
overrides):
LOG.debug(_("begin create_instance for id: %s") % self.id)
LOG.debug("begin create_instance for id: %s" % self.id)
security_groups = None
# If security group support is enabled and heat based instance
@ -183,8 +183,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
err = inst_models.InstanceTasks.BUILDING_ERROR_SEC_GROUP
self._log_and_raise(e, msg, err)
else:
LOG.debug(_("Successfully created security group for "
"instance: %s") % self.id)
LOG.debug("Successfully created security group for "
"instance: %s" % self.id)
if use_heat:
volume_info = self._create_server_volume_heat(
@ -249,7 +249,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
err = inst_models.InstanceTasks.BUILDING_ERROR_DNS
self._log_and_raise(e, msg, err)
else:
LOG.debug(_("Successfully created DNS entry for instance: %s") %
LOG.debug("Successfully created DNS entry for instance: %s" %
self.id)
# Make sure the service becomes active before sending a usage
@ -269,7 +269,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
except Exception:
LOG.exception(_("Error during create-event call."))
LOG.debug(_("end create_instance for id: %s") % self.id)
LOG.debug("end create_instance for id: %s" % self.id)
def report_root_enabled(self):
mysql_models.RootHistory.create(self.context, self.id, 'root')
@ -329,7 +329,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
def _create_server_volume(self, flavor_id, image_id, security_groups,
datastore_manager, volume_size,
availability_zone, nics):
LOG.debug(_("begin _create_server_volume for id: %s") % self.id)
LOG.debug("begin _create_server_volume for id: %s" % self.id)
try:
files = {"/etc/guest_info": ("[DEFAULT]\n--guest_id="
"%s\n--datastore_manager=%s\n"
@ -347,12 +347,12 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
files=files, volume=volume_ref,
security_groups=security_groups,
availability_zone=availability_zone, nics=nics)
LOG.debug(_("Created new compute instance %(server_id)s "
"for id: %(id)s") %
LOG.debug("Created new compute instance %(server_id)s "
"for id: %(id)s" %
{'server_id': server.id, 'id': self.id})
server_dict = server._info
LOG.debug(_("Server response: %s") % server_dict)
LOG.debug("Server response: %s" % server_dict)
volume_id = None
for volume in server_dict.get('os:volumes', []):
volume_id = volume.get('id')
@ -362,20 +362,20 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
except Exception as e:
msg = _("Error creating server and volume for "
"instance %s") % self.id
LOG.debug(_("end _create_server_volume for id: %s") % self.id)
LOG.debug("end _create_server_volume for id: %s" % self.id)
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
self._log_and_raise(e, msg, err)
device_path = CONF.device_path
mount_point = CONF.get(datastore_manager).mount_point
volume_info = {'device_path': device_path, 'mount_point': mount_point}
LOG.debug(_("end _create_server_volume for id: %s") % self.id)
LOG.debug("end _create_server_volume for id: %s" % self.id)
return volume_info
def _create_server_volume_heat(self, flavor, image_id,
datastore_manager,
volume_size, availability_zone, nics):
LOG.debug(_("begin _create_server_volume_heat for id: %s") % self.id)
LOG.debug("begin _create_server_volume_heat for id: %s" % self.id)
try:
client = create_heat_client(self.context)
@ -433,7 +433,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
self.update_db(compute_instance_id=instance_id)
except (TroveError, heat_exceptions.HTTPNotFound) as e:
msg = _("Error during creating stack for instance %s") % self.id
msg = "Error during creating stack for instance %s" % self.id
LOG.debug(msg)
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
self._log_and_raise(e, msg, err)
@ -442,14 +442,14 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
mount_point = CONF.get(datastore_manager).mount_point
volume_info = {'device_path': device_path, 'mount_point': mount_point}
LOG.debug(_("end _create_server_volume_heat for id: %s") % self.id)
LOG.debug("end _create_server_volume_heat for id: %s" % self.id)
return volume_info
def _create_server_volume_individually(self, flavor_id, image_id,
security_groups, datastore_manager,
volume_size,
availability_zone, nics):
LOG.debug(_("begin _create_server_volume_individually for id: %s") %
LOG.debug("begin _create_server_volume_individually for id: %s" %
self.id)
server = None
volume_info = self._build_volume_info(datastore_manager,
@ -467,14 +467,14 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
msg = _("Error creating server for instance %s") % self.id
err = inst_models.InstanceTasks.BUILDING_ERROR_SERVER
self._log_and_raise(e, msg, err)
LOG.debug(_("end _create_server_volume_individually for id: %s") %
LOG.debug("end _create_server_volume_individually for id: %s" %
self.id)
return volume_info
def _build_volume_info(self, datastore_manager, volume_size=None):
volume_info = None
volume_support = CONF.trove_volume_support
LOG.debug(_("trove volume support = %s") % volume_support)
LOG.debug("trove volume support = %s" % volume_support)
if volume_support:
try:
volume_info = self._create_volume(
@ -484,8 +484,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
err = inst_models.InstanceTasks.BUILDING_ERROR_VOLUME
self._log_and_raise(e, msg, err)
else:
LOG.debug(_("device_path = %s") % CONF.device_path)
LOG.debug(_("mount_point = %s") %
LOG.debug("device_path = %s" % CONF.device_path)
LOG.debug("mount_point = %s" %
CONF.get(datastore_manager).mount_point)
volume_info = {
'block_device': None,
@ -504,7 +504,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
def _create_volume(self, volume_size, datastore_manager):
LOG.info("Entering create_volume")
LOG.debug(_("begin _create_volume for id: %s") % self.id)
LOG.debug("begin _create_volume for id: %s" % self.id)
volume_client = create_cinder_client(self.context)
volume_desc = ("datastore volume for %s" % self.id)
volume_ref = volume_client.volumes.create(
@ -523,11 +523,11 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
v_ref = volume_client.volumes.get(volume_ref.id)
if v_ref.status in ['error']:
raise VolumeCreationFailure()
LOG.debug(_("end _create_volume for id: %s") % self.id)
LOG.debug("end _create_volume for id: %s" % self.id)
return self._build_volume(v_ref, datastore_manager)
def _build_volume(self, v_ref, datastore_manager):
LOG.debug(_("Created volume %s") % v_ref)
LOG.debug("Created volume %s" % v_ref)
# The mapping is in the format:
# <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>]
# setting the delete_on_terminate instance to true=1
@ -541,8 +541,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
device_path = CONF.device_path
mount_point = CONF.get(datastore_manager).mount_point
LOG.debug(_("device_path = %s") % device_path)
LOG.debug(_("mount_point = %s") % mount_point)
LOG.debug("device_path = %s" % device_path)
LOG.debug("mount_point = %s" % mount_point)
volume_info = {'block_device': block_device,
'device_path': device_path,
@ -573,8 +573,8 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
name, image_id, flavor_id, files=files, userdata=userdata,
security_groups=security_groups, block_device_mapping=bdmap,
availability_zone=availability_zone, nics=nics)
LOG.debug(_("Created new compute instance %(server_id)s "
"for id: %(id)s") %
LOG.debug("Created new compute instance %(server_id)s "
"for id: %(id)s" %
{'server_id': server.id, 'id': self.id})
return server
@ -593,10 +593,10 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
overrides=overrides)
def _create_dns_entry(self):
LOG.debug(_("%(gt)s: Creating dns entry for instance: %(id)s") %
LOG.debug("%(gt)s: Creating dns entry for instance: %(id)s" %
{'gt': greenthread.getcurrent(), 'id': self.id})
dns_support = CONF.trove_dns_support
LOG.debug(_("trove dns support = %s") % dns_support)
LOG.debug("trove dns support = %s" % dns_support)
if dns_support:
dns_client = create_dns_client(self.context)
@ -632,7 +632,7 @@ class FreshInstanceTasks(FreshInstance, NotifyMixin, ConfigurationMixin):
raise TroveError('Error creating DNS. No IP available.')
dns_client.create_instance_entry(self.id, ip)
else:
LOG.debug(_("%(gt)s: DNS not enabled for instance: %(id)s") %
LOG.debug("%(gt)s: DNS not enabled for instance: %(id)s" %
{'gt': greenthread.getcurrent(), 'id': self.id})
def _create_secgroup(self, datastore_manager):
@ -695,7 +695,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
"""
def _delete_resources(self, deleted_at):
LOG.debug(_("begin _delete_resources for id: %s") % self.id)
LOG.debug("begin _delete_resources for id: %s" % self.id)
server_id = self.db_info.compute_instance_id
old_server = self.nova_client.servers.get(server_id)
try:
@ -711,7 +711,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
% self.server.id)
try:
dns_support = CONF.trove_dns_support
LOG.debug(_("trove dns support = %s") % dns_support)
LOG.debug("trove dns support = %s" % dns_support)
if dns_support:
dns_api = create_dns_client(self.context)
dns_api.delete_instance_entry(instance_id=self.db_info.id)
@ -741,7 +741,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.send_usage_event('delete',
deleted_at=timeutils.isotime(deleted_at),
server=old_server)
LOG.debug(_("end _delete_resources for id: %s") % self.id)
LOG.debug("end _delete_resources for id: %s" % self.id)
def server_status_matches(self, expected_status, server=None):
if not server:
@ -750,29 +750,29 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
status.upper() for status in expected_status)
def resize_volume(self, new_size):
LOG.debug(_("begin resize_volume for instance: %s") % self.id)
LOG.debug("begin resize_volume for instance: %s" % self.id)
action = ResizeVolumeAction(self, self.volume_size, new_size)
action.execute()
LOG.debug(_("end resize_volume for instance: %s") % self.id)
LOG.debug("end resize_volume for instance: %s" % self.id)
def resize_flavor(self, old_flavor, new_flavor):
action = ResizeAction(self, old_flavor, new_flavor)
action.execute()
def migrate(self, host):
LOG.debug(_("Calling migrate with host(%s)...") % host)
LOG.debug("Calling migrate with host(%s)..." % host)
action = MigrateAction(self, host)
action.execute()
def create_backup(self, backup_info):
LOG.debug(_("Calling create_backup %s ") % self.id)
LOG.debug("Calling create_backup %s " % self.id)
self.guest.create_backup(backup_info)
def reboot(self):
try:
LOG.debug(_("Instance %s calling stop_db...") % self.id)
LOG.debug("Instance %s calling stop_db..." % self.id)
self.guest.stop_db()
LOG.debug(_("Rebooting instance %s") % self.id)
LOG.debug("Rebooting instance %s" % self.id)
self.server.reboot()
# Poll nova until instance is active
@ -790,35 +790,35 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
# Set the status to PAUSED. The guest agent will reset the status
# when the reboot completes and MySQL is running.
self.set_datastore_status_to_paused()
LOG.debug(_("Successfully rebooted instance %s") % self.id)
LOG.debug("Successfully rebooted instance %s" % self.id)
except Exception as e:
LOG.error(_("Failed to reboot instance %(id)s: %(e)s") %
{'id': self.id, 'e': str(e)})
finally:
LOG.debug(_("Rebooting FINALLY %s") % self.id)
LOG.debug("Rebooting FINALLY %s" % self.id)
self.reset_task_status()
def restart(self):
LOG.debug(_("Restarting datastore on instance %s ") % self.id)
LOG.debug("Restarting datastore on instance %s " % self.id)
try:
self.guest.restart()
LOG.debug(_("Restarting datastore successful %s ") % self.id)
LOG.debug("Restarting datastore successful %s " % self.id)
except GuestError:
LOG.error(_("Failure to restart datastore for instance %s.") %
self.id)
finally:
LOG.debug(_("Restarting complete on instance %s ") % self.id)
LOG.debug("Restarting complete on instance %s " % self.id)
self.reset_task_status()
def update_overrides(self, overrides, remove=False):
LOG.debug(_("Updating configuration overrides on instance %s")
LOG.debug("Updating configuration overrides on instance %s"
% self.id)
LOG.debug(_("overrides: %s") % overrides)
LOG.debug(_("self.ds_version: %s") % self.ds_version.__dict__)
LOG.debug("overrides: %s" % overrides)
LOG.debug("self.ds_version: %s" % self.ds_version.__dict__)
# todo(cp16net) How do we know what datastore type we have?
need_restart = do_configs_require_restart(
overrides, datastore_manager=self.ds_version.manager)
LOG.debug(_("do we need a restart?: %s") % need_restart)
LOG.debug("do we need a restart?: %s" % need_restart)
if need_restart:
status = inst_models.InstanceTasks.RESTART_REQUIRED
self.update_db(task_status=status)
@ -832,19 +832,19 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.guest.update_overrides(config_overrides.config_contents,
remove=remove)
self.guest.apply_overrides(overrides)
LOG.debug(_("Configuration overrides update successful."))
LOG.debug("Configuration overrides update successful.")
except GuestError:
LOG.error(_("Failed to update configuration overrides."))
def unassign_configuration(self, flavor, configuration_id):
LOG.debug(_("Unassigning the configuration from the instance %s")
LOG.debug("Unassigning the configuration from the instance %s"
% self.id)
LOG.debug(_("Unassigning the configuration id %s")
LOG.debug("Unassigning the configuration id %s"
% self.configuration.id)
def _find_item(items, item_name):
LOG.debug(_("items: %s") % items)
LOG.debug(_("item_name: %s") % item_name)
LOG.debug("items: %s" % items)
LOG.debug("item_name: %s" % item_name)
# find the item in the list
for i in items:
if i[0] == item_name:
@ -871,13 +871,13 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
"ds_manager": self.ds_version.manager,
"config": default_config,
}
LOG.debug(_("default %(ds_manager)s section: %(config)s") % args)
LOG.debug(_("self.configuration: %s") % self.configuration.__dict__)
LOG.debug("default %(ds_manager)s section: %(config)s" % args)
LOG.debug("self.configuration: %s" % self.configuration.__dict__)
overrides = {}
config_items = Configuration.load_items(self.context, configuration_id)
for item in config_items:
LOG.debug(_("finding item(%s)") % item.__dict__)
LOG.debug("finding item(%s)" % item.__dict__)
try:
key, val = _find_item(default_config, item.configuration_key)
except TypeError:
@ -886,7 +886,7 @@ class BuiltInstanceTasks(BuiltInstance, NotifyMixin, ConfigurationMixin):
self.update_db(task_status=restart_required)
if val:
overrides[item.configuration_key] = _convert_value(val)
LOG.debug(_("setting the default variables in dict: %s") % overrides)
LOG.debug("setting the default variables in dict: %s" % overrides)
self.update_overrides(overrides, remove=True)
self.update_db(configuration_id=None)
@ -1026,23 +1026,23 @@ class ResizeVolumeAction(ConfigurationMixin):
self.instance.restart()
def _stop_db(self):
LOG.debug(_("Instance %s calling stop_db.") % self.instance.id)
LOG.debug("Instance %s calling stop_db." % self.instance.id)
self.instance.guest.stop_db()
@try_recover
def _unmount_volume(self):
LOG.debug(_("Unmounting the volume on instance %(id)s") % {
LOG.debug("Unmounting the volume on instance %(id)s" % {
'id': self.instance.id})
mount_point = self.get_mount_point()
self.instance.guest.unmount_volume(device_path=CONF.device_path,
mount_point=mount_point)
LOG.debug(_("Successfully unmounted the volume %(vol_id)s for "
"instance %(id)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Successfully unmounted the volume %(vol_id)s for "
"instance %(id)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _detach_volume(self):
LOG.debug(_("Detach volume %(vol_id)s from instance %(id)s") % {
LOG.debug("Detach volume %(vol_id)s from instance %(id)s" % {
'vol_id': self.instance.volume_id,
'id': self.instance.id})
self.instance.nova_client.volumes.delete_server_volume(
@ -1056,14 +1056,14 @@ class ResizeVolumeAction(ConfigurationMixin):
sleep_time=2,
time_out=CONF.volume_time_out)
LOG.debug(_("Successfully detached volume %(vol_id)s from instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
LOG.debug("Successfully detached volume %(vol_id)s from instance "
"%(id)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _attach_volume(self):
LOG.debug(_("Attach volume %(vol_id)s to instance %(id)s at "
"%(dev)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Attach volume %(vol_id)s to instance %(id)s at "
"%(dev)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id, 'dev': CONF.device_path})
self.instance.nova_client.volumes.create_server_volume(
self.instance.server.id, self.instance.volume_id, CONF.device_path)
@ -1076,41 +1076,41 @@ class ResizeVolumeAction(ConfigurationMixin):
sleep_time=2,
time_out=CONF.volume_time_out)
LOG.debug(_("Successfully attached volume %(vol_id)s to instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Successfully attached volume %(vol_id)s to instance "
"%(id)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _resize_fs(self):
LOG.debug(_("Resizing the filesystem for instance %(id)s") % {
LOG.debug("Resizing the filesystem for instance %(id)s" % {
'id': self.instance.id})
mount_point = self.get_mount_point()
self.instance.guest.resize_fs(device_path=CONF.device_path,
mount_point=mount_point)
LOG.debug(_("Successfully resized volume %(vol_id)s filesystem for "
"instance %(id)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Successfully resized volume %(vol_id)s filesystem for "
"instance %(id)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _mount_volume(self):
LOG.debug(_("Mount the volume on instance %(id)s") % {
LOG.debug("Mount the volume on instance %(id)s" % {
'id': self.instance.id})
mount_point = self.get_mount_point()
self.instance.guest.mount_volume(device_path=CONF.device_path,
mount_point=mount_point)
LOG.debug(_("Successfully mounted the volume %(vol_id)s on instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Successfully mounted the volume %(vol_id)s on instance "
"%(id)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
@try_recover
def _extend(self):
LOG.debug(_("Extending volume %(vol_id)s for instance %(id)s to "
"size %(size)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Extending volume %(vol_id)s for instance %(id)s to "
"size %(size)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id, 'size': self.new_size})
self.instance.volume_client.volumes.extend(self.instance.volume_id,
self.new_size)
LOG.debug(_("Successfully extended the volume %(vol_id)s for instance "
"%(id)s") % {'vol_id': self.instance.volume_id,
LOG.debug("Successfully extended the volume %(vol_id)s for instance "
"%(id)s" % {'vol_id': self.instance.volume_id,
'id': self.instance.id})
def _verify_extend(self):
@ -1153,7 +1153,7 @@ class ResizeVolumeAction(ConfigurationMixin):
raise
def _resize_active_volume(self):
LOG.debug(_("begin _resize_active_volume for id: %(id)s") % {
LOG.debug("begin _resize_active_volume for id: %(id)s" % {
'id': self.instance.id})
self._stop_db()
self._unmount_volume(recover_func=self._recover_restart)
@ -1165,13 +1165,13 @@ class ResizeVolumeAction(ConfigurationMixin):
self._resize_fs(recover_func=self._fail)
self._mount_volume(recover_func=self._fail)
self.instance.restart()
LOG.debug(_("end _resize_active_volume for id: %(id)s") % {
LOG.debug("end _resize_active_volume for id: %(id)s" % {
'id': self.instance.id})
def execute(self):
LOG.debug(_("%(gt)s: Resizing instance %(id)s volume for server "
LOG.debug("%(gt)s: Resizing instance %(id)s volume for server "
"%(server_id)s from %(old_volume_size)s to "
"%(new_size)r GB") % {'gt': greenthread.getcurrent(),
"%(new_size)r GB" % {'gt': greenthread.getcurrent(),
'id': self.instance.id,
'server_id': self.instance.server.id,
'old_volume_size': self.old_size,
@ -1252,12 +1252,12 @@ class ResizeActionBase(ConfigurationMixin):
"""Checks the procs; if anything is wrong, reverts the operation."""
# Tell the guest to turn back on, and make sure it can start.
self._assert_guest_is_ok()
LOG.debug(_("Nova guest is ok."))
LOG.debug("Nova guest is ok.")
self._assert_datastore_is_ok()
LOG.debug(_("Datastore is ok."))
LOG.debug("Datastore is ok.")
def _confirm_nova_action(self):
LOG.debug(_("Instance %s calling Compute confirm resize...")
LOG.debug("Instance %s calling Compute confirm resize..."
% self.instance.id)
self.instance.server.confirm_resize()
@ -1271,14 +1271,14 @@ class ResizeActionBase(ConfigurationMixin):
rd_instance.ServiceStatuses.SHUTDOWN))
def _revert_nova_action(self):
LOG.debug(_("Instance %s calling Compute revert resize...")
LOG.debug("Instance %s calling Compute revert resize..."
% self.instance.id)
self.instance.server.revert_resize()
def execute(self):
"""Initiates the action."""
try:
LOG.debug(_("Instance %s calling stop_db...")
LOG.debug("Instance %s calling stop_db..."
% self.instance.id)
self._assert_datastore_is_offline()
self._perform_nova_action()
@ -1294,23 +1294,23 @@ class ResizeActionBase(ConfigurationMixin):
def _perform_nova_action(self):
"""Calls Nova to resize or migrate an instance, and confirms."""
LOG.debug(_("begin resize method _perform_nova_action instance: %s") %
LOG.debug("begin resize method _perform_nova_action instance: %s" %
self.instance.id)
need_to_revert = False
try:
LOG.debug(_("Initiating nova action"))
LOG.debug("Initiating nova action")
self._initiate_nova_action()
LOG.debug(_("Waiting for nova action"))
LOG.debug("Waiting for nova action")
self._wait_for_nova_action()
LOG.debug(_("Asserting nova status is ok"))
LOG.debug("Asserting nova status is ok")
self._assert_nova_status_is_ok()
need_to_revert = True
LOG.debug(_("* * * REVERT BARRIER PASSED * * *"))
LOG.debug(_("Asserting nova action success"))
LOG.debug("* * * REVERT BARRIER PASSED * * *")
LOG.debug("Asserting nova action success")
self._assert_nova_action_was_successful()
LOG.debug(_("Asserting processes are OK"))
LOG.debug("Asserting processes are OK")
self._assert_processes_are_ok()
LOG.debug(_("Confirming nova action"))
LOG.debug("Confirming nova action")
self._confirm_nova_action()
except Exception as ex:
LOG.exception(_("Exception during nova action."))
@ -1330,9 +1330,9 @@ class ResizeActionBase(ConfigurationMixin):
LOG.error(_("Error resizing instance %s.") % self.instance.id)
raise ex
LOG.debug(_("Recording success"))
LOG.debug("Recording success")
self._record_action_success()
LOG.debug(_("end resize method _perform_nova_action instance: %s") %
LOG.debug("end resize method _perform_nova_action instance: %s" %
self.instance.id)
def _wait_for_nova_action(self):
@ -1381,9 +1381,9 @@ class ResizeAction(ResizeActionBase):
self.instance.server.resize(self.new_flavor_id)
def _revert_nova_action(self):
LOG.debug(_("Instance %s calling Compute revert resize...")
LOG.debug("Instance %s calling Compute revert resize..."
% self.instance.id)
LOG.debug(_("Repairing config."))
LOG.debug("Repairing config.")
try:
config = self._render_config(
self.instance.datastore_version.manager,
@ -1394,11 +1394,11 @@ class ResizeAction(ResizeActionBase):
self.instance.guest.reset_configuration(config)
except GuestTimeout:
LOG.exception(_("Error sending reset_configuration call."))
LOG.debug(_("Reverting resize."))
LOG.debug("Reverting resize.")
super(ResizeAction, self)._revert_nova_action()
def _record_action_success(self):
LOG.debug(_("Updating instance %(id)s to flavor_id %(flavor_id)s.")
LOG.debug("Updating instance %(id)s to flavor_id %(flavor_id)s."
% {'id': self.instance.id, 'flavor_id': self.new_flavor_id})
self.instance.update_db(flavor_id=self.new_flavor_id,
task_status=inst_models.InstanceTasks.NONE)
@ -1423,17 +1423,17 @@ class MigrateAction(ResizeActionBase):
self.host = host
def _assert_nova_action_was_successful(self):
LOG.debug(_("Currently no assertions for a Migrate Action"))
LOG.debug("Currently no assertions for a Migrate Action")
def _initiate_nova_action(self):
LOG.debug(_("Migrating instance %s without flavor change ...")
LOG.debug("Migrating instance %s without flavor change ..."
% self.instance.id)
LOG.debug(_("Forcing migration to host(%s)") % self.host)
LOG.debug("Forcing migration to host(%s)" % self.host)
self.instance.server.migrate(force_host=self.host)
def _record_action_success(self):
LOG.debug(_("Successfully finished Migration to "
"%(hostname)s: %(id)s") %
LOG.debug("Successfully finished Migration to "
"%(hostname)s: %(id)s" %
{'hostname': self.instance.hostname,
'id': self.instance.id})

View File

@ -272,8 +272,8 @@ class FakeServers(object):
mapping = "%s::%s:%s" % (volume.id, volume.size, 1)
block_device_mapping = {'vdb': mapping}
volumes = [volume]
LOG.debug(_("Fake Volume Create %(volumeid)s with "
"status %(volumestatus)s") %
LOG.debug("Fake Volume Create %(volumeid)s with "
"status %(volumestatus)s" %
{'volumeid': volume.id, 'volumestatus': volume.status})
else:
volumes = self._get_volumes_from_bdm(block_device_mapping)
@ -347,7 +347,7 @@ class FakeServers(object):
def set_server_running():
instance = DBInstance.find_by(compute_instance_id=id)
LOG.debug(_("Setting server %s to running") % instance.id)
LOG.debug("Setting server %s to running" % instance.id)
status = InstanceServiceStatus.find_by(instance_id=instance.id)
status.status = rd_instance.ServiceStatuses.RUNNING
status.save()
@ -387,7 +387,7 @@ class FakeServerVolumes(object):
def get_server_volumes(self, server_id):
class ServerVolumes(object):
def __init__(self, block_device_mapping):
LOG.debug(_("block_device_mapping = %s") %
LOG.debug("block_device_mapping = %s" %
block_device_mapping)
device = block_device_mapping['vdb']
(self.volumeId,
@ -499,8 +499,8 @@ class FakeVolumes(object):
raise Exception("No volume for you!")
else:
volume.schedule_status("available", 2)
LOG.debug(_("Fake volume created %(volumeid)s with "
"status %(volumestatus)s") %
LOG.debug("Fake volume created %(volumeid)s with "
"status %(volumestatus)s" %
{'volumeid': volume.id, 'volumestatus': volume.status})
LOG.info("FAKE_VOLUMES_DB : %s" % FAKE_VOLUMES_DB)
return volume
@ -509,7 +509,7 @@ class FakeVolumes(object):
return [self.db[key] for key in self.db]
def extend(self, volume_id, new_size):
LOG.debug(_("Resize volume id (%(volumeid)s) to size (%(size)s)") %
LOG.debug("Resize volume id (%(volumeid)s) to size (%(size)s)" %
{'volumeid': volume_id, 'size': new_size})
volume = self.get(volume_id)

View File

@ -38,7 +38,7 @@ class FakeSwiftClient(object):
@classmethod
def Connection(self, *args, **kargs):
LOG.debug(_("fake FakeSwiftClient Connection"))
LOG.debug("fake FakeSwiftClient Connection")
return FakeSwiftConnection()
@ -74,7 +74,7 @@ class FakeSwiftConnection(object):
'x-account-object-count': '0'}, [])
def head_container(self, container):
LOG.debug(_("fake head_container(%s)") % container)
LOG.debug("fake head_container(%s)" % container)
if container == 'missing_container':
raise swift.ClientException('fake exception',
http_status=httplib.NOT_FOUND)
@ -86,11 +86,11 @@ class FakeSwiftConnection(object):
pass
def put_container(self, container):
LOG.debug(_("fake put_container(%s)") % container)
LOG.debug("fake put_container(%s)" % container)
pass
def get_container(self, container, **kwargs):
LOG.debug(_("fake get_container(%s)") % container)
LOG.debug("fake get_container(%s)" % container)
fake_header = None
fake_body = [{'name': 'backup_001'},
{'name': 'backup_002'},
@ -98,7 +98,7 @@ class FakeSwiftConnection(object):
return fake_header, fake_body
def head_object(self, container, name):
LOG.debug(_("fake put_container(%(container)s, %(name)s)") %
LOG.debug("fake put_container(%(container)s, %(name)s)" %
{'container': container, 'name': name})
checksum = md5()
if self.manifest_prefix and self.manifest_name == name:
@ -120,7 +120,7 @@ class FakeSwiftConnection(object):
return {'etag': '"%s"' % checksum.hexdigest()}
def get_object(self, container, name, resp_chunk_size=None):
LOG.debug(_("fake get_object(%(container)s, %(name)s)") %
LOG.debug("fake get_object(%(container)s, %(name)s)" %
{'container': container, 'name': name})
if container == 'socket_error_on_get':
raise socket.error(111, 'ECONNREFUSED')
@ -158,7 +158,7 @@ class FakeSwiftConnection(object):
return (fake_header, fake_object_body)
def put_object(self, container, name, contents, **kwargs):
LOG.debug(_("fake put_object(%(container)s, %(name)s)") %
LOG.debug("fake put_object(%(container)s, %(name)s)" %
{'container': container, 'name': name})
if container == 'socket_error_on_put':
raise socket.error(111, 'ECONNREFUSED')
@ -192,11 +192,11 @@ class FakeSwiftConnection(object):
return object_checksum.hexdigest()
def post_object(self, container, name, headers={}):
LOG.debug(_("fake post_object(%(container)s, %(name)s, %(head)s)") %
LOG.debug("fake post_object(%(container)s, %(name)s, %(head)s)" %
{'container': container, 'name': name, 'head': str(headers)})
def delete_object(self, container, name):
LOG.debug(_("fake delete_object(%(container)s, %(name)s)") %
LOG.debug("fake delete_object(%(container)s, %(name)s)" %
{'container': container, 'name': name})
if container == 'socket_error_on_delete':
raise socket.error(111, 'ECONNREFUSED')

View File

@ -72,12 +72,12 @@ class FakeVerifier(object):
def notify(context, message):
"""Simple test notify function which saves the messages to global list."""
LOG.debug(_('Received Usage Notification: %s') % message)
LOG.debug('Received Usage Notification: %s' % message)
payload = message.get('payload', None)
payload['event_type'] = message['event_type']
resource_id = payload['instance_id']
global MESSAGE_QUEUE
MESSAGE_QUEUE[resource_id].append(payload)
LOG.debug(_('Message Queue for %(id)s now has %(msg_count)d messages') %
LOG.debug('Message Queue for %(id)s now has %(msg_count)d messages' %
{'id': resource_id,
'msg_count': len(MESSAGE_QUEUE[resource_id])})