Update i18n translation for neutron.agents log msg's

Don't translate debug level logs and enforce log hints
Our translation policy
(https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation) calls
for not translating debug level logs. This is to help prioritize log
translation. Furthermore translation has a performance overhead, even if
the log isn't used (since neutron doesn't support lazy translation yet).

NOTE: this is done on a directory by directory basis to ensure that we
do not have too many conflicts and rebases.

Add a local hacking rule to enforce this.

This patch set enforces the directory neutron/agents

Partial-bug: #1320867

Change-Id: I4bd562e5138c2d2850072440aa121f27e902463a
This commit is contained in:
Gary Kotton 2014-05-16 23:48:21 -07:00
parent 8806ed2494
commit ccbdf83d8d
21 changed files with 259 additions and 213 deletions

View File

@ -8,6 +8,7 @@ Neutron Style Commandments
Neutron Specific Commandments Neutron Specific Commandments
-------------------------- --------------------------
- [N319] Validate that debug level logs are not translated
- [N320] Validate that LOG messages, except debug ones, have translations - [N320] Validate that LOG messages, except debug ones, have translations
- [N321] Validate that jsonutils module is used instead of json - [N321] Validate that jsonutils module is used instead of json
- [N322] We do not use @authors tags in source files. We have git to track - [N322] We do not use @authors tags in source files. We have git to track

View File

@ -38,6 +38,7 @@ from neutron.common import topics
from neutron.common import utils from neutron.common import utils
from neutron import context from neutron import context
from neutron import manager from neutron import manager
from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.openstack.common import importutils from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall from neutron.openstack.common import loopingcall
@ -100,15 +101,13 @@ class DhcpAgent(manager.Manager):
self.cache.put(net) self.cache.put(net)
except NotImplementedError: except NotImplementedError:
# just go ahead with an empty networks cache # just go ahead with an empty networks cache
LOG.debug( LOG.debug("The '%s' DHCP-driver does not support retrieving of a "
_("The '%s' DHCP-driver does not support retrieving of a " "list of existing networks",
"list of existing networks"), self.conf.dhcp_driver)
self.conf.dhcp_driver
)
def after_start(self): def after_start(self):
self.run() self.run()
LOG.info(_("DHCP agent started")) LOG.info(_LI("DHCP agent started"))
def run(self): def run(self):
"""Activate the DHCP agent.""" """Activate the DHCP agent."""
@ -117,7 +116,7 @@ class DhcpAgent(manager.Manager):
def call_driver(self, action, network, **action_kwargs): def call_driver(self, action, network, **action_kwargs):
"""Invoke an action on a DHCP driver instance.""" """Invoke an action on a DHCP driver instance."""
LOG.debug(_('Calling driver for network: %(net)s action: %(action)s'), LOG.debug('Calling driver for network: %(net)s action: %(action)s',
{'net': network.id, 'action': action}) {'net': network.id, 'action': action})
try: try:
# the Driver expects something that is duck typed similar to # the Driver expects something that is duck typed similar to
@ -133,18 +132,19 @@ class DhcpAgent(manager.Manager):
except exceptions.Conflict: except exceptions.Conflict:
# No need to resync here, the agent will receive the event related # No need to resync here, the agent will receive the event related
# to a status update for the network # to a status update for the network
LOG.warning(_('Unable to %(action)s dhcp for %(net_id)s: there is ' LOG.warning(_LW('Unable to %(action)s dhcp for %(net_id)s: there '
'a conflict with its current state; please check ' 'is a conflict with its current state; please '
'that the network and/or its subnet(s) still exist.') 'check that the network and/or its subnet(s) '
% {'net_id': network.id, 'action': action}) 'still exist.'),
{'net_id': network.id, 'action': action})
except Exception as e: except Exception as e:
self.schedule_resync(e, network.id) self.schedule_resync(e, network.id)
if (isinstance(e, messaging.RemoteError) if (isinstance(e, messaging.RemoteError)
and e.exc_type == 'NetworkNotFound' and e.exc_type == 'NetworkNotFound'
or isinstance(e, exceptions.NetworkNotFound)): or isinstance(e, exceptions.NetworkNotFound)):
LOG.warning(_("Network %s has been deleted."), network.id) LOG.warning(_LW("Network %s has been deleted."), network.id)
else: else:
LOG.exception(_('Unable to %(action)s dhcp for %(net_id)s.') LOG.exception(_LE('Unable to %(action)s dhcp for %(net_id)s.')
% {'net_id': network.id, 'action': action}) % {'net_id': network.id, 'action': action})
def schedule_resync(self, reason, network=None): def schedule_resync(self, reason, network=None):
@ -159,7 +159,7 @@ class DhcpAgent(manager.Manager):
or 'None' is one of the networks, sync all of the networks. or 'None' is one of the networks, sync all of the networks.
""" """
only_nets = set([] if (not networks or None in networks) else networks) only_nets = set([] if (not networks or None in networks) else networks)
LOG.info(_('Synchronizing state')) LOG.info(_LI('Synchronizing state'))
pool = eventlet.GreenPool(cfg.CONF.num_sync_threads) pool = eventlet.GreenPool(cfg.CONF.num_sync_threads)
known_network_ids = set(self.cache.get_network_ids()) known_network_ids = set(self.cache.get_network_ids())
@ -171,8 +171,8 @@ class DhcpAgent(manager.Manager):
self.disable_dhcp_helper(deleted_id) self.disable_dhcp_helper(deleted_id)
except Exception as e: except Exception as e:
self.schedule_resync(e, deleted_id) self.schedule_resync(e, deleted_id)
LOG.exception(_('Unable to sync network state on deleted ' LOG.exception(_LE('Unable to sync network state on '
'network %s'), deleted_id) 'deleted network %s'), deleted_id)
for network in active_networks: for network in active_networks:
if (not only_nets or # specifically resync all if (not only_nets or # specifically resync all
@ -180,11 +180,11 @@ class DhcpAgent(manager.Manager):
network.id in only_nets): # specific network to sync network.id in only_nets): # specific network to sync
pool.spawn(self.safe_configure_dhcp_for_network, network) pool.spawn(self.safe_configure_dhcp_for_network, network)
pool.waitall() pool.waitall()
LOG.info(_('Synchronizing state complete')) LOG.info(_LI('Synchronizing state complete'))
except Exception as e: except Exception as e:
self.schedule_resync(e) self.schedule_resync(e)
LOG.exception(_('Unable to sync network state.')) LOG.exception(_LE('Unable to sync network state.'))
@utils.exception_logger() @utils.exception_logger()
def _periodic_resync_helper(self): def _periodic_resync_helper(self):
@ -199,7 +199,7 @@ class DhcpAgent(manager.Manager):
for net, r in reasons.items(): for net, r in reasons.items():
if not net: if not net:
net = "*" net = "*"
LOG.debug(_("resync (%(network)s): %(reason)s"), LOG.debug("resync (%(network)s): %(reason)s",
{"reason": r, "network": net}) {"reason": r, "network": net})
self.sync_state(reasons.keys()) self.sync_state(reasons.keys())
@ -211,11 +211,11 @@ class DhcpAgent(manager.Manager):
try: try:
network = self.plugin_rpc.get_network_info(network_id) network = self.plugin_rpc.get_network_info(network_id)
if not network: if not network:
LOG.warn(_('Network %s has been deleted.'), network_id) LOG.warn(_LW('Network %s has been deleted.'), network_id)
return network return network
except Exception as e: except Exception as e:
self.schedule_resync(e, network_id) self.schedule_resync(e, network_id)
LOG.exception(_('Network %s info call failed.'), network_id) LOG.exception(_LE('Network %s info call failed.'), network_id)
def enable_dhcp_helper(self, network_id): def enable_dhcp_helper(self, network_id):
"""Enable DHCP for a network that meets enabling criteria.""" """Enable DHCP for a network that meets enabling criteria."""
@ -228,8 +228,8 @@ class DhcpAgent(manager.Manager):
try: try:
self.configure_dhcp_for_network(network) self.configure_dhcp_for_network(network)
except (exceptions.NetworkNotFound, RuntimeError): except (exceptions.NetworkNotFound, RuntimeError):
LOG.warn(_('Network %s may have been deleted and its resources ' LOG.warn(_LW('Network %s may have been deleted and its resources '
'may have already been disposed.'), network.id) 'may have already been disposed.'), network.id)
def configure_dhcp_for_network(self, network): def configure_dhcp_for_network(self, network):
if not network.admin_state_up: if not network.admin_state_up:
@ -361,10 +361,10 @@ class DhcpAgent(manager.Manager):
if router_ports: if router_ports:
# Multiple router ports should not be allowed # Multiple router ports should not be allowed
if len(router_ports) > 1: if len(router_ports) > 1:
LOG.warning(_("%(port_num)d router ports found on the " LOG.warning(_LW("%(port_num)d router ports found on the "
"metadata access network. Only the port " "metadata access network. Only the port "
"%(port_id)s, for router %(router_id)s " "%(port_id)s, for router %(router_id)s "
"will be considered"), "will be considered"),
{'port_num': len(router_ports), {'port_num': len(router_ports),
'port_id': router_ports[0].id, 'port_id': router_ports[0].id,
'router_id': router_ports[0].device_id}) 'router_id': router_ports[0].device_id})
@ -592,13 +592,13 @@ class DhcpAgentWithStateReport(DhcpAgent):
self.use_call = False self.use_call = False
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report." LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled.")) " State report for this agent will be disabled."))
self.heartbeat.stop() self.heartbeat.stop()
self.run() self.run()
return return
except Exception: except Exception:
LOG.exception(_("Failed reporting state!")) LOG.exception(_LE("Failed reporting state!"))
return return
if self.agent_state.pop('start_flag', None): if self.agent_state.pop('start_flag', None):
self.run() self.run()
@ -607,10 +607,10 @@ class DhcpAgentWithStateReport(DhcpAgent):
"""Handle the agent_updated notification event.""" """Handle the agent_updated notification event."""
self.schedule_resync(_("Agent updated: %(payload)s") % self.schedule_resync(_("Agent updated: %(payload)s") %
{"payload": payload}) {"payload": payload})
LOG.info(_("agent_updated by server side %s!"), payload) LOG.info(_LI("agent_updated by server side %s!"), payload)
def after_start(self): def after_start(self):
LOG.info(_("DHCP agent started")) LOG.info(_LI("DHCP agent started"))
def register_options(): def register_options():

View File

@ -43,7 +43,7 @@ from neutron.common import utils as common_utils
from neutron import context as n_context from neutron import context as n_context
from neutron import manager from neutron import manager
from neutron.openstack.common import excutils from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LE, _LW from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.openstack.common import importutils from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall from neutron.openstack.common import loopingcall
@ -515,9 +515,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
self.conf self.conf
) )
except Exception: except Exception:
msg = _("Error importing interface driver " LOG.error(_LE("Error importing interface driver "
"'%s'") % self.conf.interface_driver "'%s'"), self.conf.interface_driver)
LOG.error(msg)
raise SystemExit(1) raise SystemExit(1)
self.context = n_context.get_admin_context_without_session() self.context = n_context.get_admin_context_without_session()
@ -588,12 +587,12 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
The actual values are not verified for correctness. The actual values are not verified for correctness.
""" """
if not self.conf.interface_driver: if not self.conf.interface_driver:
msg = _('An interface driver must be specified') msg = _LE('An interface driver must be specified')
LOG.error(msg) LOG.error(msg)
raise SystemExit(1) raise SystemExit(1)
if not self.conf.use_namespaces and not self.conf.router_id: if not self.conf.use_namespaces and not self.conf.router_id:
msg = _('Router id is required if not using namespaces.') msg = _LE('Router id is required if not using namespaces.')
LOG.error(msg) LOG.error(msg)
raise SystemExit(1) raise SystemExit(1)
@ -611,7 +610,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
if (ns.startswith(NS_PREFIX) if (ns.startswith(NS_PREFIX)
or ns.startswith(SNAT_NS_PREFIX))) or ns.startswith(SNAT_NS_PREFIX)))
except RuntimeError: except RuntimeError:
LOG.exception(_('RuntimeError in obtaining router list ' LOG.exception(_LE('RuntimeError in obtaining router list '
'for namespace cleanup.')) 'for namespace cleanup.'))
return set() return set()
@ -646,8 +645,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
try: try:
self._destroy_namespace(ns) self._destroy_namespace(ns)
except RuntimeError: except RuntimeError:
LOG.exception(_('Failed to destroy stale router namespace ' LOG.exception(_LE('Failed to destroy stale router namespace '
'%s'), ns) '%s'), ns)
self._clean_stale_namespaces = False self._clean_stale_namespaces = False
def _destroy_namespace(self, ns): def _destroy_namespace(self, ns):
@ -662,8 +661,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
try: try:
ns_ip.netns.delete(ns) ns_ip.netns.delete(ns)
except RuntimeError: except RuntimeError:
msg = _('Failed trying to delete namespace: %s') % ns LOG.exception(_LE('Failed trying to delete namespace: %s'), ns)
LOG.exception(msg)
def _destroy_snat_namespace(self, ns): def _destroy_snat_namespace(self, ns):
ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns) ns_ip = ip_lib.IPWrapper(self.root_helper, namespace=ns)
@ -790,8 +788,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
def _router_removed(self, router_id): def _router_removed(self, router_id):
ri = self.router_info.get(router_id) ri = self.router_info.get(router_id)
if ri is None: if ri is None:
LOG.warn(_("Info for router %s were not found. " LOG.warn(_LW("Info for router %s were not found. "
"Skipping router removal"), router_id) "Skipping router removal"), router_id)
return return
if ri.is_ha: if ri.is_ha:
@ -865,7 +863,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
if not ips: if not ips:
raise Exception(_("Router port %s has no IP address") % port['id']) raise Exception(_("Router port %s has no IP address") % port['id'])
if len(ips) > 1: if len(ips) > 1:
LOG.error(_("Ignoring multiple IPs on router port %s"), LOG.error(_LE("Ignoring multiple IPs on router port %s"),
port['id']) port['id'])
prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen prefixlen = netaddr.IPNetwork(port['subnet']['cidr']).prefixlen
port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen) port['ip_cidr'] = "%s/%s" % (ips[0]['ip_address'], prefixlen)
@ -926,7 +924,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
id in current_port_ids]) id in current_port_ids])
stale_devs = current_internal_devs - current_port_devs stale_devs = current_internal_devs - current_port_devs
for stale_dev in stale_devs: for stale_dev in stale_devs:
LOG.debug(_('Deleting stale internal router device: %s'), LOG.debug('Deleting stale internal router device: %s',
stale_dev) stale_dev)
self.driver.unplug(stale_dev, self.driver.unplug(stale_dev,
namespace=ri.ns_name, namespace=ri.ns_name,
@ -963,7 +961,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
if dev.startswith(EXTERNAL_DEV_PREFIX) if dev.startswith(EXTERNAL_DEV_PREFIX)
and dev != interface_name] and dev != interface_name]
for stale_dev in stale_devs: for stale_dev in stale_devs:
LOG.debug(_('Deleting stale external router device: %s'), LOG.debug('Deleting stale external router device: %s',
stale_dev) stale_dev)
self.driver.unplug(stale_dev, self.driver.unplug(stale_dev,
bridge=self.conf.external_network_bridge, bridge=self.conf.external_network_bridge,
@ -1133,8 +1131,8 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
processutils.ProcessExecutionError): processutils.ProcessExecutionError):
# any exception occurred here should cause the floating IP # any exception occurred here should cause the floating IP
# to be set in error state # to be set in error state
LOG.warn(_("Unable to configure IP address for " LOG.warn(_LW("Unable to configure IP address for "
"floating IP: %s"), fip['id']) "floating IP: %s"), fip['id'])
return l3_constants.FLOATINGIP_STATUS_ERROR return l3_constants.FLOATINGIP_STATUS_ERROR
if ri.router['distributed']: if ri.router['distributed']:
# Special Handling for DVR - update FIP namespace # Special Handling for DVR - update FIP namespace
@ -1216,7 +1214,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
namespace=ns_name) namespace=ns_name)
ip_wrapper.netns.execute(arping_cmd, check_exit_code=True) ip_wrapper.netns.execute(arping_cmd, check_exit_code=True)
except Exception as e: except Exception as e:
LOG.error(_("Failed sending gratuitous ARP: %s"), str(e)) LOG.error(_LE("Failed sending gratuitous ARP: %s"), str(e))
if distributed: if distributed:
device.addr.delete(net.version, ip_cidr) device.addr.delete(net.version, ip_cidr)
@ -1281,7 +1279,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
if match_port: if match_port:
return match_port[0] return match_port[0]
else: else:
LOG.error(_('DVR: no map match_port found!')) LOG.error(_LE('DVR: no map match_port found!'))
def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name, def _create_dvr_gateway(self, ri, ex_gw_port, gw_interface_name,
snat_ports): snat_ports):
@ -1485,7 +1483,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.' ns_ipr.netns.execute(['sysctl', '-w', 'net.ipv4.conf.%s.'
'send_redirects=0' % sn_int]) 'send_redirects=0' % sn_int])
except Exception: except Exception:
LOG.exception(_('DVR: error adding redirection logic')) LOG.exception(_LE('DVR: error adding redirection logic'))
def _snat_redirect_remove(self, ri, sn_port, sn_int): def _snat_redirect_remove(self, ri, sn_port, sn_int):
"""Removes rules and routes for SNAT redirection.""" """Removes rules and routes for SNAT redirection."""
@ -1497,7 +1495,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
ns_ipd.route.delete_gateway(table=snat_idx) ns_ipd.route.delete_gateway(table=snat_idx)
ns_ipr.delete_rule_priority(snat_idx) ns_ipr.delete_rule_priority(snat_idx)
except Exception: except Exception:
LOG.exception(_('DVR: removed snat failed')) LOG.exception(_LE('DVR: removed snat failed'))
def _internal_network_added(self, ns_name, network_id, port_id, def _internal_network_added(self, ns_name, network_id, port_id,
internal_cidr, mac_address, internal_cidr, mac_address,
@ -1598,7 +1596,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
self.plugin_rpc.get_agent_gateway_port( self.plugin_rpc.get_agent_gateway_port(
self.context, network_id)) self.context, network_id))
if 'subnet' not in self.agent_gateway_port: if 'subnet' not in self.agent_gateway_port:
LOG.error(_('Missing subnet/agent_gateway_port')) LOG.error(_LE('Missing subnet/agent_gateway_port'))
return return
self._set_subnet_info(self.agent_gateway_port) self._set_subnet_info(self.agent_gateway_port)
@ -1718,7 +1716,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
def router_deleted(self, context, router_id): def router_deleted(self, context, router_id):
"""Deal with router deletion RPC message.""" """Deal with router deletion RPC message."""
LOG.debug(_('Got router deleted notification for %s'), router_id) LOG.debug('Got router deleted notification for %s', router_id)
update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER)
self._queue.add(update) self._queue.add(update)
@ -1739,7 +1737,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
elif operation == 'delete': elif operation == 'delete':
device.neigh.delete(net.version, ip, mac) device.neigh.delete(net.version, ip, mac)
except Exception: except Exception:
LOG.exception(_("DVR: Failed updating arp entry")) LOG.exception(_LE("DVR: Failed updating arp entry"))
self.fullsync = True self.fullsync = True
def add_arp_entry(self, context, payload): def add_arp_entry(self, context, payload):
@ -1766,7 +1764,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
def routers_updated(self, context, routers): def routers_updated(self, context, routers):
"""Deal with routers modification and creation RPC message.""" """Deal with routers modification and creation RPC message."""
LOG.debug(_('Got routers updated notification :%s'), routers) LOG.debug('Got routers updated notification :%s', routers)
if routers: if routers:
# This is needed for backward compatibility # This is needed for backward compatibility
if isinstance(routers[0], dict): if isinstance(routers[0], dict):
@ -1776,19 +1774,19 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
self._queue.add(update) self._queue.add(update)
def router_removed_from_agent(self, context, payload): def router_removed_from_agent(self, context, payload):
LOG.debug(_('Got router removed from agent :%r'), payload) LOG.debug('Got router removed from agent :%r', payload)
router_id = payload['router_id'] router_id = payload['router_id']
update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER) update = RouterUpdate(router_id, PRIORITY_RPC, action=DELETE_ROUTER)
self._queue.add(update) self._queue.add(update)
def router_added_to_agent(self, context, payload): def router_added_to_agent(self, context, payload):
LOG.debug(_('Got router added to agent :%r'), payload) LOG.debug('Got router added to agent :%r', payload)
self.routers_updated(context, payload) self.routers_updated(context, payload)
def _process_router_if_compatible(self, router): def _process_router_if_compatible(self, router):
if (self.conf.external_network_bridge and if (self.conf.external_network_bridge and
not ip_lib.device_exists(self.conf.external_network_bridge)): not ip_lib.device_exists(self.conf.external_network_bridge)):
LOG.error(_("The external network bridge '%s' does not exist"), LOG.error(_LE("The external network bridge '%s' does not exist"),
self.conf.external_network_bridge) self.conf.external_network_bridge)
return return
@ -1828,7 +1826,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
routers = self.plugin_rpc.get_routers(self.context, routers = self.plugin_rpc.get_routers(self.context,
[update.id]) [update.id])
except Exception: except Exception:
msg = _("Failed to fetch router information for '%s'") msg = _LE("Failed to fetch router information for '%s'")
LOG.exception(msg, update.id) LOG.exception(msg, update.id)
self.fullsync = True self.fullsync = True
continue continue
@ -1869,7 +1867,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
def _sync_routers_task(self, context): def _sync_routers_task(self, context):
if self.services_sync: if self.services_sync:
super(L3NATAgent, self).process_services_sync(context) super(L3NATAgent, self).process_services_sync(context)
LOG.debug(_("Starting _sync_routers_task - fullsync:%s"), LOG.debug("Starting _sync_routers_task - fullsync:%s",
self.fullsync) self.fullsync)
if not self.fullsync: if not self.fullsync:
return return
@ -1887,7 +1885,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
routers = self.plugin_rpc.get_routers( routers = self.plugin_rpc.get_routers(
context, router_ids) context, router_ids)
LOG.debug(_('Processing :%r'), routers) LOG.debug('Processing :%r', routers)
for r in routers: for r in routers:
update = RouterUpdate(r['id'], update = RouterUpdate(r['id'],
PRIORITY_SYNC_ROUTERS_TASK, PRIORITY_SYNC_ROUTERS_TASK,
@ -1895,12 +1893,12 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
timestamp=timestamp) timestamp=timestamp)
self._queue.add(update) self._queue.add(update)
self.fullsync = False self.fullsync = False
LOG.debug(_("_sync_routers_task successfully completed")) LOG.debug("_sync_routers_task successfully completed")
except messaging.MessagingException: except messaging.MessagingException:
LOG.exception(_("Failed synchronizing routers due to RPC error")) LOG.exception(_LE("Failed synchronizing routers due to RPC error"))
self.fullsync = True self.fullsync = True
except Exception: except Exception:
LOG.exception(_("Failed synchronizing routers")) LOG.exception(_LE("Failed synchronizing routers"))
self.fullsync = True self.fullsync = True
else: else:
# Resync is not necessary for the cleanup of stale namespaces # Resync is not necessary for the cleanup of stale namespaces
@ -1923,7 +1921,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
def after_start(self): def after_start(self):
eventlet.spawn_n(self._process_routers_loop) eventlet.spawn_n(self._process_routers_loop)
LOG.info(_("L3 agent started")) LOG.info(_LI("L3 agent started"))
def _update_routing_table(self, ri, operation, route): def _update_routing_table(self, ri, operation, route):
cmd = ['ip', 'route', operation, 'to', route['destination'], cmd = ['ip', 'route', operation, 'to', route['destination'],
@ -1942,7 +1940,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
adds, removes = common_utils.diff_list_of_dict(old_routes, adds, removes = common_utils.diff_list_of_dict(old_routes,
new_routes) new_routes)
for route in adds: for route in adds:
LOG.debug(_("Added route entry is '%s'"), route) LOG.debug("Added route entry is '%s'", route)
# remove replaced route from deleted route # remove replaced route from deleted route
for del_route in removes: for del_route in removes:
if route['destination'] == del_route['destination']: if route['destination'] == del_route['destination']:
@ -1950,7 +1948,7 @@ class L3NATAgent(firewall_l3_agent.FWaaSL3AgentRpcCallback,
#replace success even if there is no existing route #replace success even if there is no existing route
self._update_routing_table(ri, 'replace', route) self._update_routing_table(ri, 'replace', route)
for route in removes: for route in removes:
LOG.debug(_("Removed route entry is '%s'"), route) LOG.debug("Removed route entry is '%s'", route)
self._update_routing_table(ri, 'delete', route) self._update_routing_table(ri, 'delete', route)
ri.routes = new_routes ri.routes = new_routes
@ -1984,7 +1982,7 @@ class L3NATAgentWithStateReport(L3NATAgent):
self.heartbeat.start(interval=report_interval) self.heartbeat.start(interval=report_interval)
def _report_state(self): def _report_state(self):
LOG.debug(_("Report state task started")) LOG.debug("Report state task started")
num_ex_gw_ports = 0 num_ex_gw_ports = 0
num_interfaces = 0 num_interfaces = 0
num_floating_ips = 0 num_floating_ips = 0
@ -2008,20 +2006,20 @@ class L3NATAgentWithStateReport(L3NATAgent):
self.use_call) self.use_call)
self.agent_state.pop('start_flag', None) self.agent_state.pop('start_flag', None)
self.use_call = False self.use_call = False
LOG.debug(_("Report state task successfully completed")) LOG.debug("Report state task successfully completed")
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_("Neutron server does not support state report." LOG.warn(_LW("Neutron server does not support state report."
" State report for this agent will be disabled.")) " State report for this agent will be disabled."))
self.heartbeat.stop() self.heartbeat.stop()
return return
except Exception: except Exception:
LOG.exception(_("Failed reporting state!")) LOG.exception(_LE("Failed reporting state!"))
def agent_updated(self, context, payload): def agent_updated(self, context, payload):
"""Handle the agent_updated notification event.""" """Handle the agent_updated notification event."""
self.fullsync = True self.fullsync = True
LOG.info(_("agent_updated by server side %s!"), payload) LOG.info(_LI("agent_updated by server side %s!"), payload)
def _register_opts(conf): def _register_opts(conf):

View File

@ -17,6 +17,7 @@ import eventlet.event
import eventlet.queue import eventlet.queue
from neutron.agent.linux import utils from neutron.agent.linux import utils
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -79,13 +80,13 @@ class AsyncProcess(object):
if self._kill_event: if self._kill_event:
raise AsyncProcessException(_('Process is already started')) raise AsyncProcessException(_('Process is already started'))
else: else:
LOG.debug(_('Launching async process [%s].'), self.cmd) LOG.debug('Launching async process [%s].', self.cmd)
self._spawn() self._spawn()
def stop(self): def stop(self):
"""Halt the process and watcher threads.""" """Halt the process and watcher threads."""
if self._kill_event: if self._kill_event:
LOG.debug(_('Halting async process [%s].'), self.cmd) LOG.debug('Halting async process [%s].', self.cmd)
self._kill() self._kill()
else: else:
raise AsyncProcessException(_('Process is not running.')) raise AsyncProcessException(_('Process is not running.'))
@ -160,20 +161,20 @@ class AsyncProcess(object):
stale_pid = (isinstance(ex, RuntimeError) and stale_pid = (isinstance(ex, RuntimeError) and
'No such process' in str(ex)) 'No such process' in str(ex))
if not stale_pid: if not stale_pid:
LOG.exception(_('An error occurred while killing [%s].'), LOG.exception(_LE('An error occurred while killing [%s].'),
self.cmd) self.cmd)
return False return False
return True return True
def _handle_process_error(self): def _handle_process_error(self):
"""Kill the async process and respawn if necessary.""" """Kill the async process and respawn if necessary."""
LOG.debug(_('Halting async process [%s] in response to an error.'), LOG.debug('Halting async process [%s] in response to an error.',
self.cmd) self.cmd)
respawning = self.respawn_interval >= 0 respawning = self.respawn_interval >= 0
self._kill(respawning=respawning) self._kill(respawning=respawning)
if respawning: if respawning:
eventlet.sleep(self.respawn_interval) eventlet.sleep(self.respawn_interval)
LOG.debug(_('Respawning async process [%s].'), self.cmd) LOG.debug('Respawning async process [%s].', self.cmd)
self._spawn() self._spawn()
def _watch_process(self, callback, kill_event): def _watch_process(self, callback, kill_event):
@ -182,8 +183,8 @@ class AsyncProcess(object):
if not callback(): if not callback():
break break
except Exception: except Exception:
LOG.exception(_('An error occurred while communicating ' LOG.exception(_LE('An error occurred while communicating '
'with async process [%s].'), self.cmd) 'with async process [%s].'), self.cmd)
break break
# Ensure that watching a process with lots of output does # Ensure that watching a process with lots of output does
# not block execution of other greenthreads. # not block execution of other greenthreads.

View File

@ -18,6 +18,7 @@ import os
import signal import signal
import sys import sys
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -32,7 +33,7 @@ class Pidfile(object):
self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR) self.fd = os.open(pidfile, os.O_CREAT | os.O_RDWR)
fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB) fcntl.flock(self.fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError: except IOError:
LOG.exception(_("Error while handling pidfile: %s"), pidfile) LOG.exception(_LE("Error while handling pidfile: %s"), pidfile)
sys.exit(1) sys.exit(1)
def __str__(self): def __str__(self):
@ -89,7 +90,7 @@ class Daemon(object):
if pid > 0: if pid > 0:
sys.exit(0) sys.exit(0)
except OSError: except OSError:
LOG.exception(_('Fork failed')) LOG.exception(_LE('Fork failed'))
sys.exit(1) sys.exit(1)
def daemonize(self): def daemonize(self):
@ -131,8 +132,8 @@ class Daemon(object):
if self.pidfile.is_running(): if self.pidfile.is_running():
self.pidfile.unlock() self.pidfile.unlock()
message = _('Pidfile %s already exist. Daemon already running?') LOG.error(_LE('Pidfile %s already exist. Daemon already '
LOG.error(message, self.pidfile) 'running?'), self.pidfile)
sys.exit(1) sys.exit(1)
# Start the daemon # Start the daemon

View File

@ -31,6 +31,7 @@ from neutron.agent.linux import utils
from neutron.common import constants from neutron.common import constants
from neutron.common import exceptions from neutron.common import exceptions
from neutron.common import utils as commonutils from neutron.common import utils as commonutils
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import importutils from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.openstack.common import uuidutils from neutron.openstack.common import uuidutils
@ -216,14 +217,14 @@ class DhcpLocalProcess(DhcpBase):
cmd = ['kill', '-9', pid] cmd = ['kill', '-9', pid]
utils.execute(cmd, self.root_helper) utils.execute(cmd, self.root_helper)
else: else:
LOG.debug(_('DHCP for %(net_id)s is stale, pid %(pid)d ' LOG.debug('DHCP for %(net_id)s is stale, pid %(pid)d '
'does not exist, performing cleanup'), 'does not exist, performing cleanup',
{'net_id': self.network.id, 'pid': pid}) {'net_id': self.network.id, 'pid': pid})
if not retain_port: if not retain_port:
self.device_manager.destroy(self.network, self.device_manager.destroy(self.network,
self.interface_name) self.interface_name)
else: else:
LOG.debug(_('No DHCP started for %s'), self.network.id) LOG.debug('No DHCP started for %s', self.network.id)
self._remove_config_files() self._remove_config_files()
@ -234,8 +235,8 @@ class DhcpLocalProcess(DhcpBase):
try: try:
ns_ip.netns.delete(self.network.namespace) ns_ip.netns.delete(self.network.namespace)
except RuntimeError: except RuntimeError:
msg = _('Failed trying to delete namespace: %s') LOG.exception(_LE('Failed trying to delete namespace: %s'),
LOG.exception(msg, self.network.namespace) self.network.namespace)
def _remove_config_files(self): def _remove_config_files(self):
confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs)) confs_dir = os.path.abspath(os.path.normpath(self.conf.dhcp_confs))
@ -325,15 +326,15 @@ class Dnsmasq(DhcpLocalProcess):
ver = re.findall("\d+.\d+", out)[0] ver = re.findall("\d+.\d+", out)[0]
is_valid_version = float(ver) >= cls.MINIMUM_VERSION is_valid_version = float(ver) >= cls.MINIMUM_VERSION
if not is_valid_version: if not is_valid_version:
LOG.error(_('FAILED VERSION REQUIREMENT FOR DNSMASQ. ' LOG.error(_LE('FAILED VERSION REQUIREMENT FOR DNSMASQ. '
'DHCP AGENT MAY NOT RUN CORRECTLY! ' 'DHCP AGENT MAY NOT RUN CORRECTLY! '
'Please ensure that its version is %s ' 'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION) 'or above!'), cls.MINIMUM_VERSION)
raise SystemExit(1) raise SystemExit(1)
except (OSError, RuntimeError, IndexError, ValueError): except (OSError, RuntimeError, IndexError, ValueError):
LOG.error(_('Unable to determine dnsmasq version. ' LOG.error(_LE('Unable to determine dnsmasq version. '
'Please ensure that its version is %s ' 'Please ensure that its version is %s '
'or above!'), cls.MINIMUM_VERSION) 'or above!'), cls.MINIMUM_VERSION)
raise SystemExit(1) raise SystemExit(1)
return float(ver) return float(ver)
@ -436,8 +437,8 @@ class Dnsmasq(DhcpLocalProcess):
# If all subnets turn off dhcp, kill the process. # If all subnets turn off dhcp, kill the process.
if not self._enable_dhcp(): if not self._enable_dhcp():
self.disable() self.disable()
LOG.debug(_('Killing dhcpmasq for network since all subnets have ' LOG.debug('Killing dhcpmasq for network since all subnets have '
'turned off DHCP: %s'), self.network.id) 'turned off DHCP: %s', self.network.id)
return return
self._release_unused_leases() self._release_unused_leases()
@ -448,8 +449,8 @@ class Dnsmasq(DhcpLocalProcess):
cmd = ['kill', '-HUP', self.pid] cmd = ['kill', '-HUP', self.pid]
utils.execute(cmd, self.root_helper) utils.execute(cmd, self.root_helper)
else: else:
LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), self.pid) LOG.debug('Pid %d is stale, relaunching dnsmasq', self.pid)
LOG.debug(_('Reloading allocations for network: %s'), self.network.id) LOG.debug('Reloading allocations for network: %s', self.network.id)
self.device_manager.update(self.network, self.interface_name) self.device_manager.update(self.network, self.interface_name)
def _iter_hosts(self): def _iter_hosts(self):
@ -500,7 +501,7 @@ class Dnsmasq(DhcpLocalProcess):
buf = six.StringIO() buf = six.StringIO()
filename = self.get_conf_file_name('host') filename = self.get_conf_file_name('host')
LOG.debug(_('Building host file: %s'), filename) LOG.debug('Building host file: %s', filename)
for (port, alloc, hostname, name) in self._iter_hosts(): for (port, alloc, hostname, name) in self._iter_hosts():
# (dzyu) Check if it is legal ipv6 address, if so, need wrap # (dzyu) Check if it is legal ipv6 address, if so, need wrap
# it with '[]' to let dnsmasq to distinguish MAC address from # it with '[]' to let dnsmasq to distinguish MAC address from
@ -509,7 +510,7 @@ class Dnsmasq(DhcpLocalProcess):
if netaddr.valid_ipv6(ip_address): if netaddr.valid_ipv6(ip_address):
ip_address = '[%s]' % ip_address ip_address = '[%s]' % ip_address
LOG.debug(_('Adding %(mac)s : %(name)s : %(ip)s'), LOG.debug('Adding %(mac)s : %(name)s : %(ip)s',
{"mac": port.mac_address, "name": name, {"mac": port.mac_address, "name": name,
"ip": ip_address}) "ip": ip_address})
@ -522,7 +523,7 @@ class Dnsmasq(DhcpLocalProcess):
(port.mac_address, name, ip_address)) (port.mac_address, name, ip_address))
utils.replace_file(filename, buf.getvalue()) utils.replace_file(filename, buf.getvalue())
LOG.debug(_('Done building host file %s'), filename) LOG.debug('Done building host file %s', filename)
return filename return filename
def _read_hosts_file_leases(self, filename): def _read_hosts_file_leases(self, filename):
@ -788,17 +789,16 @@ class DeviceManager(object):
self.root_helper = root_helper self.root_helper = root_helper
self.plugin = plugin self.plugin = plugin
if not conf.interface_driver: if not conf.interface_driver:
msg = _('An interface driver must be specified') LOG.error(_LE('An interface driver must be specified'))
LOG.error(msg)
raise SystemExit(1) raise SystemExit(1)
try: try:
self.driver = importutils.import_object( self.driver = importutils.import_object(
conf.interface_driver, conf) conf.interface_driver, conf)
except Exception as e: except Exception as e:
msg = (_("Error importing interface driver '%(driver)s': " LOG.error(_LE("Error importing interface driver '%(driver)s': "
"%(inner)s") % {'driver': conf.interface_driver, "%(inner)s"),
'inner': e}) {'driver': conf.interface_driver,
LOG.error(msg) 'inner': e})
raise SystemExit(1) raise SystemExit(1)
def get_interface_name(self, network, port): def get_interface_name(self, network, port):
@ -835,8 +835,9 @@ class DeviceManager(object):
continue continue
if gateway != subnet.gateway_ip: if gateway != subnet.gateway_ip:
m = _('Setting gateway for dhcp netns on net %(n)s to %(ip)s') LOG.debug('Setting gateway for dhcp netns on net %(n)s to '
LOG.debug(m, {'n': network.id, 'ip': subnet.gateway_ip}) '%(ip)s',
{'n': network.id, 'ip': subnet.gateway_ip})
device.route.add_gateway(subnet.gateway_ip) device.route.add_gateway(subnet.gateway_ip)
@ -845,8 +846,7 @@ class DeviceManager(object):
# No subnets on the network have a valid gateway. Clean it up to avoid # No subnets on the network have a valid gateway. Clean it up to avoid
# confusion from seeing an invalid gateway here. # confusion from seeing an invalid gateway here.
if gateway is not None: if gateway is not None:
msg = _('Removing gateway for dhcp netns on net %s') LOG.debug('Removing gateway for dhcp netns on net %s', network.id)
LOG.debug(msg, network.id)
device.route.delete_gateway(gateway) device.route.delete_gateway(gateway)
@ -889,8 +889,8 @@ class DeviceManager(object):
# check for a reserved DHCP port # check for a reserved DHCP port
if dhcp_port is None: if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist. Checking for a reserved port.'), ' does not yet exist. Checking for a reserved port.',
{'device_id': device_id, 'network_id': network.id}) {'device_id': device_id, 'network_id': network.id})
for port in network.ports: for port in network.ports:
port_device_id = getattr(port, 'device_id', None) port_device_id = getattr(port, 'device_id', None)
@ -903,9 +903,9 @@ class DeviceManager(object):
# DHCP port has not yet been created. # DHCP port has not yet been created.
if dhcp_port is None: if dhcp_port is None:
LOG.debug(_('DHCP port %(device_id)s on network %(network_id)s' LOG.debug('DHCP port %(device_id)s on network %(network_id)s'
' does not yet exist.'), {'device_id': device_id, ' does not yet exist.', {'device_id': device_id,
'network_id': network.id}) 'network_id': network.id})
port_dict = dict( port_dict = dict(
name='', name='',
admin_state_up=True, admin_state_up=True,
@ -938,7 +938,7 @@ class DeviceManager(object):
if ip_lib.ensure_device_is_ready(interface_name, if ip_lib.ensure_device_is_ready(interface_name,
self.root_helper, self.root_helper,
network.namespace): network.namespace):
LOG.debug(_('Reusing existing device: %s.'), interface_name) LOG.debug('Reusing existing device: %s.', interface_name)
else: else:
self.driver.plug(network.id, self.driver.plug(network.id,
port.id, port.id,

View File

@ -26,7 +26,7 @@ from neutron.agent.linux import utils
from neutron.common import constants as n_const from neutron.common import constants as n_const
from neutron.common import exceptions from neutron.common import exceptions
from neutron.extensions import flavor from neutron.extensions import flavor
from neutron.openstack.common.gettextutils import _LE from neutron.openstack.common.gettextutils import _LE, _LI
from neutron.openstack.common import importutils from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -262,7 +262,7 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
if self.conf.ovs_use_veth: if self.conf.ovs_use_veth:
root_dev.link.set_up() root_dev.link.set_up()
else: else:
LOG.info(_("Device %s already exists"), device_name) LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None): def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface.""" """Unplug the interface."""
@ -280,9 +280,9 @@ class OVSInterfaceDriver(LinuxInterfaceDriver):
self.root_helper, self.root_helper,
namespace) namespace)
device.link.delete() device.link.delete()
LOG.debug(_("Unplugged interface '%s'"), device_name) LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError: except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"), LOG.error(_LE("Failed unplugging interface '%s'"),
device_name) device_name)
@ -317,7 +317,7 @@ class MidonetInterfaceDriver(LinuxInterfaceDriver):
utils.execute(cmd, self.root_helper) utils.execute(cmd, self.root_helper)
else: else:
LOG.info(_("Device %s already exists"), device_name) LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None): def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
# the port will be deleted by the dhcp agent that will call the plugin # the port will be deleted by the dhcp agent that will call the plugin
@ -327,8 +327,8 @@ class MidonetInterfaceDriver(LinuxInterfaceDriver):
try: try:
device.link.delete() device.link.delete()
except RuntimeError: except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"), device_name) LOG.error(_LE("Failed unplugging interface '%s'"), device_name)
LOG.debug(_("Unplugged interface '%s'"), device_name) LOG.debug("Unplugged interface '%s'", device_name)
ip_lib.IPWrapper( ip_lib.IPWrapper(
self.root_helper, namespace).garbage_collect_namespace() self.root_helper, namespace).garbage_collect_namespace()
@ -380,7 +380,7 @@ class IVSInterfaceDriver(LinuxInterfaceDriver):
ns_dev.link.set_up() ns_dev.link.set_up()
root_dev.link.set_up() root_dev.link.set_up()
else: else:
LOG.info(_("Device %s already exists"), device_name) LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None): def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface.""" """Unplug the interface."""
@ -392,9 +392,9 @@ class IVSInterfaceDriver(LinuxInterfaceDriver):
self.root_helper, self.root_helper,
namespace) namespace)
device.link.delete() device.link.delete()
LOG.debug(_("Unplugged interface '%s'"), device_name) LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError: except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"), LOG.error(_LE("Failed unplugging interface '%s'"),
device_name) device_name)
@ -427,16 +427,16 @@ class BridgeInterfaceDriver(LinuxInterfaceDriver):
ns_veth.link.set_up() ns_veth.link.set_up()
else: else:
LOG.info(_("Device %s already exists"), device_name) LOG.info(_LI("Device %s already exists"), device_name)
def unplug(self, device_name, bridge=None, namespace=None, prefix=None): def unplug(self, device_name, bridge=None, namespace=None, prefix=None):
"""Unplug the interface.""" """Unplug the interface."""
device = ip_lib.IPDevice(device_name, self.root_helper, namespace) device = ip_lib.IPDevice(device_name, self.root_helper, namespace)
try: try:
device.link.delete() device.link.delete()
LOG.debug(_("Unplugged interface '%s'"), device_name) LOG.debug("Unplugged interface '%s'", device_name)
except RuntimeError: except RuntimeError:
LOG.error(_("Failed unplugging interface '%s'"), LOG.error(_LE("Failed unplugging interface '%s'"),
device_name) device_name)
@ -495,6 +495,6 @@ class MetaInterfaceDriver(LinuxInterfaceDriver):
return driver.unplug(device_name, bridge, namespace, prefix) return driver.unplug(device_name, bridge, namespace, prefix)
def _load_driver(self, driver_provider): def _load_driver(self, driver_provider):
LOG.debug(_("Driver location: %s"), driver_provider) LOG.debug("Driver location: %s", driver_provider)
plugin_klass = importutils.import_class(driver_provider) plugin_klass = importutils.import_class(driver_provider)
return plugin_klass(self.conf) return plugin_klass(self.conf)

View File

@ -22,6 +22,7 @@ from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import iptables_manager from neutron.agent.linux import iptables_manager
from neutron.common import constants from neutron.common import constants
from neutron.common import ipv6_utils from neutron.common import ipv6_utils
from neutron.openstack.common.gettextutils import _LI
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -84,7 +85,7 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
self.sg_members[sg_id] = sg_members self.sg_members[sg_id] = sg_members
def prepare_port_filter(self, port): def prepare_port_filter(self, port):
LOG.debug(_("Preparing device (%s) filter"), port['device']) LOG.debug("Preparing device (%s) filter", port['device'])
self._remove_chains() self._remove_chains()
self.filtered_ports[port['device']] = port self.filtered_ports[port['device']] = port
# each security group has it own chains # each security group has it own chains
@ -92,10 +93,10 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
self.iptables.apply() self.iptables.apply()
def update_port_filter(self, port): def update_port_filter(self, port):
LOG.debug(_("Updating device (%s) filter"), port['device']) LOG.debug("Updating device (%s) filter", port['device'])
if port['device'] not in self.filtered_ports: if port['device'] not in self.filtered_ports:
LOG.info(_('Attempted to update port filter which is not ' LOG.info(_LI('Attempted to update port filter which is not '
'filtered %s'), port['device']) 'filtered %s'), port['device'])
return return
self._remove_chains() self._remove_chains()
self.filtered_ports[port['device']] = port self.filtered_ports[port['device']] = port
@ -103,10 +104,10 @@ class IptablesFirewallDriver(firewall.FirewallDriver):
self.iptables.apply() self.iptables.apply()
def remove_port_filter(self, port): def remove_port_filter(self, port):
LOG.debug(_("Removing device (%s) filter"), port['device']) LOG.debug("Removing device (%s) filter", port['device'])
if not self.filtered_ports.get(port['device']): if not self.filtered_ports.get(port['device']):
LOG.info(_('Attempted to remove port filter which is not ' LOG.info(_LI('Attempted to remove port filter which is not '
'filtered %r'), port) 'filtered %r'), port)
return return
self._remove_chains() self._remove_chains()
self.filtered_ports.pop(port['device'], None) self.filtered_ports.pop(port['device'], None)

View File

@ -29,6 +29,7 @@ from neutron.agent.linux import iptables_comments as ic
from neutron.agent.linux import utils as linux_utils from neutron.agent.linux import utils as linux_utils
from neutron.common import utils from neutron.common import utils
from neutron.openstack.common import excutils from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LE, _LW
from neutron.openstack.common import lockutils from neutron.openstack.common import lockutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -151,7 +152,7 @@ class IptablesTable(object):
chain_set = self._select_chain_set(wrap) chain_set = self._select_chain_set(wrap)
if name not in chain_set: if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'), LOG.warn(_LW('Attempted to remove chain %s which does not exist'),
name) name)
return return
@ -231,8 +232,8 @@ class IptablesTable(object):
self.wrap_name, self.wrap_name,
comment=comment)) comment=comment))
except ValueError: except ValueError:
LOG.warn(_('Tried to remove rule that was not there:' LOG.warn(_LW('Tried to remove rule that was not there:'
' %(chain)r %(rule)r %(wrap)r %(top)r'), ' %(chain)r %(rule)r %(wrap)r %(top)r'),
{'chain': chain, 'rule': rule, {'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap}) 'top': top, 'wrap': wrap})
@ -388,10 +389,10 @@ class IptablesManager(object):
try: try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True): with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug(_('Got semaphore / lock "%s"'), lock_name) LOG.debug('Got semaphore / lock "%s"', lock_name)
return self._apply_synchronized() return self._apply_synchronized()
finally: finally:
LOG.debug(_('Semaphore / lock released "%s"'), lock_name) LOG.debug('Semaphore / lock released "%s"', lock_name)
def _apply_synchronized(self): def _apply_synchronized(self):
"""Apply the current in-memory set of iptables rules. """Apply the current in-memory set of iptables rules.
@ -442,10 +443,10 @@ class IptablesManager(object):
all_lines[log_start:log_end], all_lines[log_start:log_end],
log_start + 1) log_start + 1)
) )
LOG.error(_("IPTablesManager.apply failed to apply the " LOG.error(_LE("IPTablesManager.apply failed to apply the "
"following set of iptables rules:\n%s"), "following set of iptables rules:\n%s"),
'\n'.join(log_lines)) '\n'.join(log_lines))
LOG.debug(_("IPTablesManager.apply completed with success")) LOG.debug("IPTablesManager.apply completed with success")
def _find_table(self, lines, table_name): def _find_table(self, lines, table_name):
if len(lines) < 3: if len(lines) < 3:
@ -455,7 +456,7 @@ class IptablesManager(object):
start = lines.index('*%s' % table_name) - 1 start = lines.index('*%s' % table_name) - 1
except ValueError: except ValueError:
# Couldn't find table_name # Couldn't find table_name
LOG.debug(_('Unable to find table %s'), table_name) LOG.debug('Unable to find table %s', table_name)
return (0, 0) return (0, 0)
end = lines[start:].index('COMMIT') + start + 2 end = lines[start:].index('COMMIT') + start + 2
return (start, end) return (start, end)
@ -659,8 +660,8 @@ class IptablesManager(object):
"""Return the sum of the traffic counters of all rules of a chain.""" """Return the sum of the traffic counters of all rules of a chain."""
cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap) cmd_tables = self._get_traffic_counters_cmd_tables(chain, wrap)
if not cmd_tables: if not cmd_tables:
LOG.warn(_('Attempted to get traffic counters of chain %s which ' LOG.warn(_LW('Attempted to get traffic counters of chain %s which '
'does not exist'), chain) 'does not exist'), chain)
return return
name = get_chain_name(chain, wrap) name = get_chain_name(chain, wrap)

View File

@ -23,7 +23,7 @@ from neutron.agent.linux import ip_lib
from neutron.agent.linux import utils from neutron.agent.linux import utils
from neutron.common import exceptions from neutron.common import exceptions
from neutron.openstack.common import excutils from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LI, _LW from neutron.openstack.common.gettextutils import _LE, _LI, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.plugins.common import constants from neutron.plugins.common import constants
@ -70,8 +70,8 @@ class BaseOVS(object):
return utils.execute(full_args, root_helper=self.root_helper) return utils.execute(full_args, root_helper=self.root_helper)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception() as ctxt: with excutils.save_and_reraise_exception() as ctxt:
LOG.error(_("Unable to execute %(cmd)s. " LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"), "Exception: %(exception)s"),
{'cmd': full_args, 'exception': e}) {'cmd': full_args, 'exception': e})
if not check_error: if not check_error:
ctxt.reraise = False ctxt.reraise = False
@ -168,7 +168,8 @@ class OVSBridge(BaseOVS):
return utils.execute(full_args, root_helper=self.root_helper, return utils.execute(full_args, root_helper=self.root_helper,
process_input=process_input) process_input=process_input)
except Exception as e: except Exception as e:
LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"), LOG.error(_LE("Unable to execute %(cmd)s. Exception: "
"%(exception)s"),
{'cmd': full_args, 'exception': e}) {'cmd': full_args, 'exception': e})
def count_flows(self): def count_flows(self):
@ -239,9 +240,9 @@ class OVSBridge(BaseOVS):
ofport = self.get_port_ofport(port_name) ofport = self.get_port_ofport(port_name)
if (tunnel_type == constants.TYPE_VXLAN and if (tunnel_type == constants.TYPE_VXLAN and
ofport == INVALID_OFPORT): ofport == INVALID_OFPORT):
LOG.error(_('Unable to create VXLAN tunnel port. Please ensure ' LOG.error(_LE('Unable to create VXLAN tunnel port. Please ensure '
'that an openvswitch version that supports VXLAN is ' 'that an openvswitch version that supports VXLAN is '
'installed.')) 'installed.'))
return ofport return ofport
def add_patch_port(self, local_name, remote_name): def add_patch_port(self, local_name, remote_name):
@ -288,8 +289,8 @@ class OVSBridge(BaseOVS):
return utils.execute(args, root_helper=self.root_helper).strip() return utils.execute(args, root_helper=self.root_helper).strip()
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_("Unable to execute %(cmd)s. " LOG.error(_LE("Unable to execute %(cmd)s. "
"Exception: %(exception)s"), "Exception: %(exception)s"),
{'cmd': args, 'exception': e}) {'cmd': args, 'exception': e})
# returns a VIF object for each VIF port # returns a VIF object for each VIF port
@ -336,7 +337,7 @@ class OVSBridge(BaseOVS):
try: try:
int_ofport = int(ofport) int_ofport = int(ofport)
except (ValueError, TypeError): except (ValueError, TypeError):
LOG.warn(_("Found not yet ready openvswitch port: %s"), row) LOG.warn(_LW("Found not yet ready openvswitch port: %s"), row)
else: else:
if int_ofport > 0: if int_ofport > 0:
if ("iface-id" in external_ids and if ("iface-id" in external_ids and
@ -351,7 +352,7 @@ class OVSBridge(BaseOVS):
external_ids["xs-vif-uuid"]) external_ids["xs-vif-uuid"])
edge_ports.add(iface_id) edge_ports.add(iface_id)
else: else:
LOG.warn(_("Found failed openvswitch port: %s"), row) LOG.warn(_LW("Found failed openvswitch port: %s"), row)
return edge_ports return edge_ports
def get_port_tag_dict(self): def get_port_tag_dict(self):
@ -519,7 +520,7 @@ class DeferredOVSBridge(object):
if exc_type is None: if exc_type is None:
self.apply_flows() self.apply_flows()
else: else:
LOG.exception(_("OVS flows could not be applied on bridge %s"), LOG.exception(_LE("OVS flows could not be applied on bridge %s"),
self.br.br_name) self.br.br_name)
@ -529,7 +530,7 @@ def get_bridge_for_iface(root_helper, iface):
try: try:
return utils.execute(args, root_helper=root_helper).strip() return utils.execute(args, root_helper=root_helper).strip()
except Exception: except Exception:
LOG.exception(_("Interface %s not found."), iface) LOG.exception(_LE("Interface %s not found."), iface)
return None return None
@ -540,7 +541,7 @@ def get_bridges(root_helper):
return utils.execute(args, root_helper=root_helper).strip().split("\n") return utils.execute(args, root_helper=root_helper).strip().split("\n")
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_("Unable to retrieve bridges. Exception: %s"), e) LOG.exception(_LE("Unable to retrieve bridges. Exception: %s"), e)
def get_bridge_external_bridge_id(root_helper, bridge): def get_bridge_external_bridge_id(root_helper, bridge):
@ -549,7 +550,7 @@ def get_bridge_external_bridge_id(root_helper, bridge):
try: try:
return utils.execute(args, root_helper=root_helper).strip() return utils.execute(args, root_helper=root_helper).strip()
except Exception: except Exception:
LOG.exception(_("Bridge %s not found."), bridge) LOG.exception(_LE("Bridge %s not found."), bridge)
return None return None

View File

@ -15,6 +15,7 @@
import eventlet import eventlet
from neutron.agent.linux import async_process from neutron.agent.linux import async_process
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -41,13 +42,13 @@ class OvsdbMonitor(async_process.AsyncProcess):
if not data: if not data:
return return
self._stdout_lines.put(data) self._stdout_lines.put(data)
LOG.debug(_('Output received from ovsdb monitor: %s') % data) LOG.debug('Output received from ovsdb monitor: %s', data)
return data return data
def _read_stderr(self): def _read_stderr(self):
data = super(OvsdbMonitor, self)._read_stderr() data = super(OvsdbMonitor, self)._read_stderr()
if data: if data:
LOG.error(_('Error received from ovsdb monitor: %s') % data) LOG.error(_LE('Error received from ovsdb monitor: %s'), data)
# Do not return value to ensure that stderr output will # Do not return value to ensure that stderr output will
# stop the monitor. # stop the monitor.

View File

@ -43,7 +43,7 @@ def create_process(cmd, root_helper=None, addl_env=None):
cmd = shlex.split(root_helper) + cmd cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd) cmd = map(str, cmd)
LOG.debug(_("Running command: %s"), cmd) LOG.debug("Running command: %s", cmd)
env = os.environ.copy() env = os.environ.copy()
if addl_env: if addl_env:
env.update(addl_env) env.update(addl_env)

View File

@ -38,7 +38,7 @@ from neutron.common import utils
from neutron import context from neutron import context
from neutron.openstack.common.cache import cache from neutron.openstack.common.cache import cache
from neutron.openstack.common import excutils from neutron.openstack.common import excutils
from neutron.openstack.common.gettextutils import _LW from neutron.openstack.common.gettextutils import _LE, _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall from neutron.openstack.common import loopingcall
from neutron import wsgi from neutron import wsgi
@ -147,7 +147,7 @@ class MetadataProxyHandler(object):
@webob.dec.wsgify(RequestClass=webob.Request) @webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req): def __call__(self, req):
try: try:
LOG.debug(_("Request: %s"), req) LOG.debug("Request: %s", req)
instance_id, tenant_id = self._get_instance_and_tenant_id(req) instance_id, tenant_id = self._get_instance_and_tenant_id(req)
if instance_id: if instance_id:
@ -156,7 +156,7 @@ class MetadataProxyHandler(object):
return webob.exc.HTTPNotFound() return webob.exc.HTTPNotFound()
except Exception: except Exception:
LOG.exception(_("Unexpected error.")) LOG.exception(_LE("Unexpected error."))
msg = _('An unknown error has occurred. ' msg = _('An unknown error has occurred. '
'Please try your request again.') 'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) return webob.exc.HTTPInternalServerError(explanation=unicode(msg))
@ -286,11 +286,10 @@ class MetadataProxyHandler(object):
req.response.body = content req.response.body = content
return req.response return req.response
elif resp.status == 403: elif resp.status == 403:
msg = _( LOG.warn(_LW(
'The remote metadata server responded with Forbidden. This ' 'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.' 'response usually occurs when shared secrets do not match.'
) ))
LOG.warn(msg)
return webob.exc.HTTPForbidden() return webob.exc.HTTPForbidden()
elif resp.status == 400: elif resp.status == 400:
return webob.exc.HTTPBadRequest() return webob.exc.HTTPBadRequest()
@ -412,12 +411,12 @@ class UnixDomainMetadataProxy(object):
use_call=self.agent_state.get('start_flag')) use_call=self.agent_state.get('start_flag'))
except AttributeError: except AttributeError:
# This means the server does not support report_state # This means the server does not support report_state
LOG.warn(_('Neutron server does not support state report.' LOG.warn(_LW('Neutron server does not support state report.'
' State report for this agent will be disabled.')) ' State report for this agent will be disabled.'))
self.heartbeat.stop() self.heartbeat.stop()
return return
except Exception: except Exception:
LOG.exception(_("Failed reporting state!")) LOG.exception(_LE("Failed reporting state!"))
return return
self.agent_state.pop('start_flag', None) self.agent_state.pop('start_flag', None)

View File

@ -26,6 +26,7 @@ import webob
from neutron.agent.linux import daemon from neutron.agent.linux import daemon
from neutron.common import config from neutron.common import config
from neutron.common import utils from neutron.common import utils
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron import wsgi from neutron import wsgi
@ -63,7 +64,7 @@ class NetworkMetadataProxyHandler(object):
@webob.dec.wsgify(RequestClass=webob.Request) @webob.dec.wsgify(RequestClass=webob.Request)
def __call__(self, req): def __call__(self, req):
LOG.debug(_("Request: %s"), req) LOG.debug("Request: %s", req)
try: try:
return self._proxy_request(req.remote_addr, return self._proxy_request(req.remote_addr,
req.method, req.method,
@ -71,7 +72,7 @@ class NetworkMetadataProxyHandler(object):
req.query_string, req.query_string,
req.body) req.body)
except Exception: except Exception:
LOG.exception(_("Unexpected error.")) LOG.exception(_LE("Unexpected error."))
msg = _('An unknown error has occurred. ' msg = _('An unknown error has occurred. '
'Please try your request again.') 'Please try your request again.')
return webob.exc.HTTPInternalServerError(explanation=unicode(msg)) return webob.exc.HTTPInternalServerError(explanation=unicode(msg))

View File

@ -29,6 +29,7 @@ from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib from neutron.agent.linux import ovs_lib
from neutron.api.v2 import attributes from neutron.api.v2 import attributes
from neutron.common import config from neutron.common import config
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import importutils from neutron.openstack.common import importutils
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -113,7 +114,7 @@ def unplug_device(conf, device):
bridge = ovs_lib.OVSBridge(bridge_name, root_helper) bridge = ovs_lib.OVSBridge(bridge_name, root_helper)
bridge.delete_port(device.name) bridge.delete_port(device.name)
else: else:
LOG.debug(_('Unable to find bridge for device: %s'), device.name) LOG.debug('Unable to find bridge for device: %s', device.name)
def destroy_namespace(conf, namespace, force=False): def destroy_namespace(conf, namespace, force=False):
@ -137,7 +138,7 @@ def destroy_namespace(conf, namespace, force=False):
ip.garbage_collect_namespace() ip.garbage_collect_namespace()
except Exception: except Exception:
LOG.exception(_('Error unable to destroy namespace: %s'), namespace) LOG.exception(_LE('Error unable to destroy namespace: %s'), namespace)
def main(): def main():

View File

@ -21,6 +21,7 @@ from neutron.agent.linux import interface
from neutron.agent.linux import ip_lib from neutron.agent.linux import ip_lib
from neutron.agent.linux import ovs_lib from neutron.agent.linux import ovs_lib
from neutron.common import config from neutron.common import config
from neutron.openstack.common.gettextutils import _LI
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
@ -70,7 +71,7 @@ def delete_neutron_ports(ports, root_helper):
if ip_lib.device_exists(port): if ip_lib.device_exists(port):
device = ip_lib.IPDevice(port, root_helper) device = ip_lib.IPDevice(port, root_helper)
device.link.delete() device.link.delete()
LOG.info(_("Delete %s"), port) LOG.info(_LI("Deleting port: %s"), port)
def main(): def main():
@ -100,11 +101,11 @@ def main():
conf.AGENT.root_helper) conf.AGENT.root_helper)
for bridge in bridges: for bridge in bridges:
LOG.info(_("Cleaning %s"), bridge) LOG.info(_LI("Cleaning bridge: %s"), bridge)
ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper) ovs = ovs_lib.OVSBridge(bridge, conf.AGENT.root_helper)
ovs.delete_ports(all_ports=conf.ovs_all_ports) ovs.delete_ports(all_ports=conf.ovs_all_ports)
# Remove remaining ports created by Neutron (usually veth pair) # Remove remaining ports created by Neutron (usually veth pair)
delete_neutron_ports(ports, conf.AGENT.root_helper) delete_neutron_ports(ports, conf.AGENT.root_helper)
LOG.info(_("OVS cleanup completed successfully")) LOG.info(_LI("OVS cleanup completed successfully"))

View File

@ -18,7 +18,7 @@ from oslo import messaging
from neutron.common import rpc as n_rpc from neutron.common import rpc as n_rpc
from neutron.common import topics from neutron.common import topics
from neutron.openstack.common.gettextutils import _LW
from neutron.openstack.common import log as logging from neutron.openstack.common import log as logging
from neutron.openstack.common import timeutils from neutron.openstack.common import timeutils
@ -107,7 +107,7 @@ class PluginApi(n_rpc.RpcProxy):
# may not work correctly, however it can function in 'degraded' # may not work correctly, however it can function in 'degraded'
# mode, in that DVR routers may not be in the system yet, and # mode, in that DVR routers may not be in the system yet, and
# it might be not necessary to retrieve info about the host. # it might be not necessary to retrieve info about the host.
LOG.warn(_('DVR functionality requires a server upgrade.')) LOG.warn(_LW('DVR functionality requires a server upgrade.'))
res = [ res = [
self.call(context, self.call(context,
self.make_msg('get_device_details', device=device, self.make_msg('get_device_details', device=device,

View File

@ -64,8 +64,8 @@ def _is_valid_driver_combination():
def is_firewall_enabled(): def is_firewall_enabled():
if not _is_valid_driver_combination(): if not _is_valid_driver_combination():
LOG.warn(_("Driver configuration doesn't match with " LOG.warn(_LW("Driver configuration doesn't match with "
"enable_security_group")) "enable_security_group"))
return cfg.CONF.SECURITYGROUP.enable_security_group return cfg.CONF.SECURITYGROUP.enable_security_group
@ -87,8 +87,8 @@ class SecurityGroupServerRpcApiMixin(object):
"""A mix-in that enable SecurityGroup support in plugin rpc.""" """A mix-in that enable SecurityGroup support in plugin rpc."""
def security_group_rules_for_devices(self, context, devices): def security_group_rules_for_devices(self, context, devices):
LOG.debug(_("Get security group rules " LOG.debug("Get security group rules "
"for devices via rpc %r"), devices) "for devices via rpc %r", devices)
return self.call(context, return self.call(context,
self.make_msg('security_group_rules_for_devices', self.make_msg('security_group_rules_for_devices',
devices=devices), devices=devices),
@ -111,9 +111,9 @@ class SecurityGroupAgentRpcCallbackMixin(object):
sg_agent = None sg_agent = None
def _security_groups_agent_not_set(self): def _security_groups_agent_not_set(self):
LOG.warning(_("Security group agent binding currently not set. " LOG.warning(_LW("Security group agent binding currently not set. "
"This should be set by the end of the init " "This should be set by the end of the init "
"process.")) "process."))
def security_groups_rule_updated(self, context, **kwargs): def security_groups_rule_updated(self, context, **kwargs):
"""Callback for security group rule update. """Callback for security group rule update.
@ -121,8 +121,8 @@ class SecurityGroupAgentRpcCallbackMixin(object):
:param security_groups: list of updated security_groups :param security_groups: list of updated security_groups
""" """
security_groups = kwargs.get('security_groups', []) security_groups = kwargs.get('security_groups', [])
LOG.debug( LOG.debug("Security group rule updated on remote: %s",
_("Security group rule updated on remote: %s"), security_groups) security_groups)
if not self.sg_agent: if not self.sg_agent:
return self._security_groups_agent_not_set() return self._security_groups_agent_not_set()
self.sg_agent.security_groups_rule_updated(security_groups) self.sg_agent.security_groups_rule_updated(security_groups)
@ -133,15 +133,15 @@ class SecurityGroupAgentRpcCallbackMixin(object):
:param security_groups: list of updated security_groups :param security_groups: list of updated security_groups
""" """
security_groups = kwargs.get('security_groups', []) security_groups = kwargs.get('security_groups', [])
LOG.debug( LOG.debug("Security group member updated on remote: %s",
_("Security group member updated on remote: %s"), security_groups) security_groups)
if not self.sg_agent: if not self.sg_agent:
return self._security_groups_agent_not_set() return self._security_groups_agent_not_set()
self.sg_agent.security_groups_member_updated(security_groups) self.sg_agent.security_groups_member_updated(security_groups)
def security_groups_provider_updated(self, context, **kwargs): def security_groups_provider_updated(self, context, **kwargs):
"""Callback for security group provider update.""" """Callback for security group provider update."""
LOG.debug(_("Provider rule updated")) LOG.debug("Provider rule updated")
if not self.sg_agent: if not self.sg_agent:
return self._security_groups_agent_not_set() return self._security_groups_agent_not_set()
self.sg_agent.security_groups_provider_updated() self.sg_agent.security_groups_provider_updated()
@ -154,10 +154,10 @@ class SecurityGroupAgentRpcMixin(object):
def init_firewall(self, defer_refresh_firewall=False): def init_firewall(self, defer_refresh_firewall=False):
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver
LOG.debug(_("Init firewall settings (driver=%s)"), firewall_driver) LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
if not _is_valid_driver_combination(): if not _is_valid_driver_combination():
LOG.warn(_("Driver configuration doesn't match " LOG.warn(_LW("Driver configuration doesn't match "
"with enable_security_group")) "with enable_security_group"))
if not firewall_driver: if not firewall_driver:
firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver' firewall_driver = 'neutron.agent.firewall.NoopFirewallDriver'
self.firewall = importutils.import_object(firewall_driver) self.firewall = importutils.import_object(firewall_driver)
@ -257,8 +257,8 @@ class SecurityGroupAgentRpcMixin(object):
devices.append(device['device']) devices.append(device['device'])
if devices: if devices:
if self.defer_refresh_firewall: if self.defer_refresh_firewall:
LOG.debug(_("Adding %s devices to the list of devices " LOG.debug("Adding %s devices to the list of devices "
"for which firewall needs to be refreshed"), "for which firewall needs to be refreshed",
devices) devices)
self.devices_to_refilter |= set(devices) self.devices_to_refilter |= set(devices)
else: else:
@ -305,7 +305,7 @@ class SecurityGroupAgentRpcMixin(object):
with self.firewall.defer_apply(): with self.firewall.defer_apply():
for device in devices.values(): for device in devices.values():
LOG.debug(_("Update port filter for %s"), device['device']) LOG.debug("Update port filter for %s", device['device'])
self.firewall.update_port_filter(device) self.firewall.update_port_filter(device)
if self.use_enhanced_rpc: if self.use_enhanced_rpc:
LOG.debug("Update security group information for ports %s", LOG.debug("Update security group information for ports %s",
@ -328,7 +328,7 @@ class SecurityGroupAgentRpcMixin(object):
updated devices updated devices
""" """
if new_devices: if new_devices:
LOG.debug(_("Preparing device filters for %d new devices"), LOG.debug("Preparing device filters for %d new devices",
len(new_devices)) len(new_devices))
self.prepare_devices_filter(new_devices) self.prepare_devices_filter(new_devices)
# These data structures are cleared here in order to avoid # These data structures are cleared here in order to avoid
@ -341,7 +341,7 @@ class SecurityGroupAgentRpcMixin(object):
# refresh providing a precise list of devices for which firewall # refresh providing a precise list of devices for which firewall
# should be refreshed # should be refreshed
if global_refresh_firewall: if global_refresh_firewall:
LOG.debug(_("Refreshing firewall for all filtered devices")) LOG.debug("Refreshing firewall for all filtered devices")
self.refresh_firewall() self.refresh_firewall()
else: else:
# If a device is both in new and updated devices # If a device is both in new and updated devices
@ -349,7 +349,7 @@ class SecurityGroupAgentRpcMixin(object):
updated_devices = ((updated_devices | devices_to_refilter) - updated_devices = ((updated_devices | devices_to_refilter) -
new_devices) new_devices)
if updated_devices: if updated_devices:
LOG.debug(_("Refreshing firewall for %d devices"), LOG.debug("Refreshing firewall for %d devices",
len(updated_devices)) len(updated_devices))
self.refresh_firewall(updated_devices) self.refresh_firewall(updated_devices)

View File

@ -35,6 +35,17 @@ log_translation = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")") r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)\(\s*('|\")")
author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"), author_tag_re = (re.compile("^\s*#\s*@?(a|A)uthor"),
re.compile("^\.\.\s+moduleauthor::")) re.compile("^\.\.\s+moduleauthor::"))
log_translation_hint = re.compile(
r"(.)*LOG\.(audit|error|info|warn|warning|critical|exception)"
"\(\s*(_\(|'|\")")
def _directory_to_check_translation(filename):
# In order to try and speed up the integration of this we will
# do it on a directory by directory basis. The last patch of the
# series will remove this and the entire code base will be validated.
dirs = ["neutron/agent"]
return any([dir in filename for dir in dirs])
def validate_log_translations(logical_line, physical_line, filename): def validate_log_translations(logical_line, physical_line, filename):
@ -47,6 +58,11 @@ def validate_log_translations(logical_line, physical_line, filename):
if log_translation.match(logical_line): if log_translation.match(logical_line):
yield (0, msg) yield (0, msg)
if _directory_to_check_translation(filename):
msg = "N320: Log messages require translation hints!"
if log_translation_hint.match(logical_line):
yield (0, msg)
def use_jsonutils(logical_line, filename): def use_jsonutils(logical_line, filename):
msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s"
@ -79,6 +95,21 @@ def no_author_tags(physical_line):
return pos, "N322: Don't use author tags" return pos, "N322: Don't use author tags"
def no_translate_debug_logs(logical_line, filename):
"""Check for 'LOG.debug(_('
As per our translation policy,
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
we shouldn't translate debug level logs.
* This check assumes that 'LOG' is a logger.
N319
"""
if _directory_to_check_translation(filename):
if logical_line.startswith("LOG.debug(_("):
yield(0, "N319 Don't translate debug level logs")
def check_assert_called_once(logical_line, filename): def check_assert_called_once(logical_line, filename):
msg = ("N323: assert_called_once is a no-op. please use " msg = ("N323: assert_called_once is a no-op. please use "
"assert_called_once_with to test with explicit parameters or an " "assert_called_once_with to test with explicit parameters or an "
@ -95,3 +126,4 @@ def factory(register):
register(use_jsonutils) register(use_jsonutils)
register(no_author_tags) register(no_author_tags)
register(check_assert_called_once) register(check_assert_called_once)
register(no_translate_debug_logs)

View File

@ -41,6 +41,12 @@ class HackingTestCase(base.BaseTestCase):
self.assertEqual( self.assertEqual(
0, len(list(checks.validate_log_translations(ok, 0, len(list(checks.validate_log_translations(ok,
ok, 'f')))) ok, 'f'))))
filename = 'neutron/agent/f'
bad = "LOG.%s(_('BAD - by directory'))" % log
self.assertEqual(
1, len(list(checks.validate_log_translations(bad,
bad,
filename))))
def test_use_jsonutils(self): def test_use_jsonutils(self):
def __get_msg(fun): def __get_msg(fun):

View File

@ -30,6 +30,7 @@ from neutron.agent.linux import interface
from neutron.common import config as base_config from neutron.common import config as base_config
from neutron.common import constants as l3_constants from neutron.common import constants as l3_constants
from neutron.common import exceptions as n_exc from neutron.common import exceptions as n_exc
from neutron.openstack.common.gettextutils import _LE
from neutron.openstack.common import processutils from neutron.openstack.common import processutils
from neutron.openstack.common import uuidutils from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as p_const from neutron.plugins.common import constants as p_const
@ -1970,8 +1971,8 @@ vrrp_instance VR_1 {
with mock.patch.object(l3_agent, 'LOG') as log: with mock.patch.object(l3_agent, 'LOG') as log:
self.assertRaises(SystemExit, l3_agent.L3NATAgent, self.assertRaises(SystemExit, l3_agent.L3NATAgent,
HOSTNAME, self.conf) HOSTNAME, self.conf)
msg = "Error importing interface driver 'wrong_driver'" msg = _LE("Error importing interface driver '%s'")
log.error.assert_called_once_with(msg) log.error.assert_called_once_with(msg, 'wrong_driver')
def test_metadata_filter_rules(self): def test_metadata_filter_rules(self):
self.conf.set_override('enable_metadata_proxy', False) self.conf.set_override('enable_metadata_proxy', False)