summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-03-04 19:06:31 +0000
committerJohannes Erdfelt <johannes.erdfelt@rackspace.com>2012-03-04 22:10:59 +0000
commit534a894ad18c180791aaa685e97cc5264acce922 (patch)
tree8cd858c0c3d0dc4ebac4871e3024db40d3ab418f
parent8813ab185d0b6ad1c111e7f9e346e2ce91c8113b (diff)
downloadnova-534a894ad18c180791aaa685e97cc5264acce922.tar.gz
nova-534a894ad18c180791aaa685e97cc5264acce922.tar.xz
nova-534a894ad18c180791aaa685e97cc5264acce922.zip
Only raw string literals should be used with _()
Fix a number of places where formatted strings were used with _() (causing gettext to not match the string) or variables with _() (causing xgettext to not extract a string) Also, there's no value in internationalizing an empty string Change-Id: Iac7dbe46eeaa8ddf03c2a357ecd52f69aa8678aa
-rw-r--r--nova/api/ec2/cloud.py32
-rw-r--r--nova/api/openstack/compute/contrib/disk_config.py2
-rw-r--r--nova/api/openstack/compute/contrib/networks.py2
-rw-r--r--nova/api/openstack/compute/limits.py6
-rw-r--r--nova/api/openstack/wsgi.py2
-rw-r--r--nova/api/validator.py8
-rw-r--r--nova/compute/manager.py8
-rw-r--r--nova/db/sqlalchemy/api.py8
-rw-r--r--nova/network/manager.py6
-rw-r--r--nova/network/quantum/client.py16
-rw-r--r--nova/network/quantum/manager.py4
-rw-r--r--nova/network/quantum/melange_connection.py2
-rw-r--r--nova/network/quantum/nova_ipam_lib.py6
-rw-r--r--nova/network/quantum/quantum_connection.py4
-rw-r--r--nova/notifier/api.py6
-rw-r--r--nova/rpc/impl_carrot.py2
-rw-r--r--nova/rpc/impl_kombu.py2
-rw-r--r--nova/rpc/impl_qpid.py4
-rw-r--r--nova/scheduler/distributed_scheduler.py6
-rw-r--r--nova/scheduler/scheduler_options.py2
-rw-r--r--nova/scheduler/vsa.py2
-rw-r--r--nova/tests/fake_utils.py2
-rw-r--r--nova/tests/test_xenapi.py8
-rw-r--r--nova/utils.py34
-rw-r--r--nova/virt/baremetal/dom.py2
-rw-r--r--nova/virt/baremetal/proxy.py6
-rw-r--r--nova/virt/baremetal/tilera.py15
-rw-r--r--nova/virt/disk/api.py2
-rw-r--r--nova/virt/libvirt/connection.py6
-rw-r--r--nova/virt/xenapi/vm_utils.py4
-rw-r--r--nova/virt/xenapi/vmops.py4
-rw-r--r--nova/volume/san.py14
32 files changed, 114 insertions, 113 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 4254a9873..b4cf50fd6 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -611,8 +611,8 @@ class CloudController(object):
def revoke_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
- err = "Not enough parameters, need group_name or group_id"
- raise exception.EC2APIError(_(err))
+ err = _("Not enough parameters, need group_name or group_id")
+ raise exception.EC2APIError(err)
self.compute_api.ensure_default_security_group(context)
notfound = exception.SecurityGroupNotFound
if group_name:
@@ -626,8 +626,8 @@ class CloudController(object):
if not security_group:
raise notfound(security_group_id=group_id)
- msg = "Revoke security group ingress %s"
- LOG.audit(_(msg), security_group['name'], context=context)
+ msg = _("Revoke security group ingress %s")
+ LOG.audit(msg, security_group['name'], context=context)
prevalues = []
try:
prevalues = kwargs['ip_permissions']
@@ -638,8 +638,8 @@ class CloudController(object):
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
if not rulesvalues:
- err = "%s Not enough parameters to build a valid rule"
- raise exception.EC2APIError(_(err % rulesvalues))
+ err = _("%s Not enough parameters to build a valid rule")
+ raise exception.EC2APIError(err % rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
@@ -665,8 +665,8 @@ class CloudController(object):
def authorize_security_group_ingress(self, context, group_name=None,
group_id=None, **kwargs):
if not group_name and not group_id:
- err = "Not enough parameters, need group_name or group_id"
- raise exception.EC2APIError(_(err))
+ err = _("Not enough parameters, need group_name or group_id")
+ raise exception.EC2APIError(err)
self.compute_api.ensure_default_security_group(context)
notfound = exception.SecurityGroupNotFound
if group_name:
@@ -680,8 +680,8 @@ class CloudController(object):
if not security_group:
raise notfound(security_group_id=group_id)
- msg = "Authorize security group ingress %s"
- LOG.audit(_(msg), security_group['name'], context=context)
+ msg = _("Authorize security group ingress %s")
+ LOG.audit(msg, security_group['name'], context=context)
prevalues = []
try:
prevalues = kwargs['ip_permissions']
@@ -691,14 +691,14 @@ class CloudController(object):
for values in prevalues:
rulesvalues = self._rule_args_to_dict(context, values)
if not rulesvalues:
- err = "%s Not enough parameters to build a valid rule"
- raise exception.EC2APIError(_(err % rulesvalues))
+ err = _("%s Not enough parameters to build a valid rule")
+ raise exception.EC2APIError(err % rulesvalues)
for values_for_rule in rulesvalues:
values_for_rule['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group,
values_for_rule):
- err = '%s - This rule already exists in group'
- raise exception.EC2APIError(_(err) % values_for_rule)
+ err = _('%s - This rule already exists in group')
+ raise exception.EC2APIError(err % values_for_rule)
postvalues.append(values_for_rule)
rule_ids = []
@@ -772,8 +772,8 @@ class CloudController(object):
def delete_security_group(self, context, group_name=None, group_id=None,
**kwargs):
if not group_name and not group_id:
- err = "Not enough parameters, need group_name or group_id"
- raise exception.EC2APIError(_(err))
+ err = _("Not enough parameters, need group_name or group_id")
+ raise exception.EC2APIError(err)
notfound = exception.SecurityGroupNotFound
if group_name:
security_group = db.security_group_get_by_name(context,
diff --git a/nova/api/openstack/compute/contrib/disk_config.py b/nova/api/openstack/compute/contrib/disk_config.py
index 648fa389b..041686a57 100644
--- a/nova/api/openstack/compute/contrib/disk_config.py
+++ b/nova/api/openstack/compute/contrib/disk_config.py
@@ -41,7 +41,7 @@ def disk_config_from_api(value):
elif value == 'MANUAL':
return False
else:
- msg = _("%s must be either 'MANUAL' or 'AUTO'." % API_DISK_CONFIG)
+ msg = _("%s must be either 'MANUAL' or 'AUTO'.") % API_DISK_CONFIG
raise exc.HTTPBadRequest(explanation=msg)
diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/networks.py
index fc44d1b36..32a4af595 100644
--- a/nova/api/openstack/compute/contrib/networks.py
+++ b/nova/api/openstack/compute/contrib/networks.py
@@ -67,7 +67,7 @@ class NetworkController(object):
def _disassociate(self, request, network_id, body):
context = request.environ['nova.context']
authorize(context)
- LOG.debug(_("Disassociating network with id %s" % network_id))
+ LOG.debug(_("Disassociating network with id %s") % network_id)
try:
self.network_api.disassociate(context, network_id)
except exception.NetworkNotFound:
diff --git a/nova/api/openstack/compute/limits.py b/nova/api/openstack/compute/limits.py
index a20b75bc9..3245d49d2 100644
--- a/nova/api/openstack/compute/limits.py
+++ b/nova/api/openstack/compute/limits.py
@@ -137,9 +137,9 @@ class Limit(object):
self.water_level = 0
self.capacity = self.unit
self.request_value = float(self.capacity) / float(self.value)
- self.error_message = _("Only %(value)s %(verb)s request(s) can be "
- "made to %(uri)s every %(unit_string)s." %
- self.__dict__)
+ msg = _("Only %(value)s %(verb)s request(s) can be "
+ "made to %(uri)s every %(unit_string)s.")
+ self.error_message = msg % self.__dict__
def __call__(self, verb, url):
"""
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 02bf0900c..665ee2f26 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -886,7 +886,7 @@ class Resource(wsgi.Application):
msg = _("%(url)s returned with HTTP %(status)d") % msg_dict
except AttributeError, e:
msg_dict = dict(url=request.url, e=e)
- msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
+ msg = _("%(url)s returned a fault: %(e)s") % msg_dict
LOG.info(msg)
diff --git a/nova/api/validator.py b/nova/api/validator.py
index e90918599..f3824075e 100644
--- a/nova/api/validator.py
+++ b/nova/api/validator.py
@@ -137,8 +137,10 @@ def validate(args, validator):
assert callable(f)
if not f(args[key]):
- msg = "%s with value %s failed validator %s" % (
- key, args[key], f.__name__)
- LOG.debug(_(msg))
+ value = args[key]
+ validator = f.__name__
+ msg = _("%(key)s with value %(value)s failed validator"
+ " %(validator)s")
+ LOG.debug(msg % locals())
return False
return True
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index eee4d303c..9f78fec27 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -989,17 +989,17 @@ class ComputeManager(manager.SchedulerDependentManager):
images = fetch_images()
num_images = len(images)
- LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
- % locals()))
+ LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)")
+ % locals())
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
- LOG.debug(_("Rotating out %d backups" % excess))
+ LOG.debug(_("Rotating out %d backups") % excess)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
- LOG.debug(_("Deleting image %s" % image_id))
+ LOG.debug(_("Deleting image %s") % image_id)
image_service.delete(context, image_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index b824146a0..635071b29 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -562,8 +562,8 @@ def compute_node_utilization_update(context, host, free_ram_mb_delta=0,
with_lockmode('update').\
first()
if compute_node is None:
- raise exception.NotFound(_("No ComputeNode for %(host)s" %
- locals()))
+ raise exception.NotFound(_("No ComputeNode for %(host)s") %
+ locals())
# This table thingy is how we get atomic UPDATE x = x + 1
# semantics.
@@ -597,8 +597,8 @@ def compute_node_utilization_set(context, host, free_ram_mb=None,
with_lockmode('update').\
first()
if compute_node is None:
- raise exception.NotFound(_("No ComputeNode for %(host)s" %
- locals()))
+ raise exception.NotFound(_("No ComputeNode for %(host)s") %
+ locals())
if free_ram_mb != None:
compute_node.free_ram_mb = free_ram_mb
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 7cc12a4f1..9dd75b03b 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -270,7 +270,7 @@ class FloatingIP(object):
self.l3driver.add_floating_ip(floating_ip['address'],
fixed_address, floating_ip['interface'])
except exception.ProcessExecutionError:
- msg = _('Interface %(interface)s not found' % locals())
+ msg = _('Interface %(interface)s not found') % locals()
LOG.debug(msg)
raise exception.NoFloatingIpInterface(interface=interface)
@@ -468,7 +468,7 @@ class FloatingIP(object):
fixed_address = self.db.floating_ip_disassociate(context,
floating_address)
if "Cannot find device" in str(e):
- msg = _('Interface %(interface)s not found' % locals())
+ msg = _('Interface %(interface)s not found') % locals()
LOG.error(msg)
raise exception.NoFloatingIpInterface(interface=interface)
@@ -1384,7 +1384,7 @@ class NetworkManager(manager.SchedulerDependentManager):
if require_disassociated and network.project_id is not None:
raise ValueError(_('Network must be disassociated from project %s'
- ' before delete' % network.project_id))
+ ' before delete') % network.project_id)
db.network_delete_safe(context, network.id)
@property
diff --git a/nova/network/quantum/client.py b/nova/network/quantum/client.py
index 1f7a70c66..92ec3702c 100644
--- a/nova/network/quantum/client.py
+++ b/nova/network/quantum/client.py
@@ -177,8 +177,8 @@ class Client(object):
if self.logger:
self.logger.debug(
- _("Quantum Client Request: %(method)s %(action)s" %
- locals()))
+ _("Quantum Client Request: %(method)s %(action)s") %
+ locals())
if body:
self.logger.debug(body)
@@ -193,7 +193,7 @@ class Client(object):
if status_code in NOT_FOUND_CODES:
raise QuantumNotFoundException(
- _("Quantum entity not found: %s" % data))
+ _("Quantum entity not found: %s") % data)
if status_code in (httplib.OK,
httplib.CREATED,
@@ -203,12 +203,12 @@ class Client(object):
return self.deserialize(data, status_code)
else:
raise QuantumServerException(
- _("Server %(status_code)s error: %(data)s"
- % locals()))
+ _("Server %(status_code)s error: %(data)s")
+ % locals())
except (socket.error, IOError), e:
raise QuantumIOException(_("Unable to connect to "
- "server. Got error: %s" % e))
+ "server. Got error: %s") % e)
def get_status_code(self, response):
"""Returns the integer status code from the response, which
@@ -225,8 +225,8 @@ class Client(object):
elif isinstance(data, dict):
return JSONSerializer().serialize(data, self.content_type())
else:
- raise Exception(_("unable to deserialize object of type = '%s'" %
- type(data)))
+ raise Exception(_("unable to deserialize object of type = '%s'") %
+ type(data))
def deserialize(self, data, status_code):
return JSONSerializer().deserialize(data, self.content_type())
diff --git a/nova/network/quantum/manager.py b/nova/network/quantum/manager.py
index f08b44d98..fad8ecc33 100644
--- a/nova/network/quantum/manager.py
+++ b/nova/network/quantum/manager.py
@@ -203,8 +203,8 @@ class QuantumManager(manager.FloatingIP, manager.FlatManager):
if not self.q_conn.network_exists(q_tenant_id, quantum_net_id):
raise Exception(_("Unable to find existing quantum "
"network for tenant '%(q_tenant_id)s' "
- "with net-id '%(quantum_net_id)s'" %
- locals()))
+ "with net-id '%(quantum_net_id)s'") %
+ locals())
else:
nova_id = self._get_nova_id()
quantum_net_id = self.q_conn.create_network(q_tenant_id, label,
diff --git a/nova/network/quantum/melange_connection.py b/nova/network/quantum/melange_connection.py
index b5393e310..1d7efb37b 100644
--- a/nova/network/quantum/melange_connection.py
+++ b/nova/network/quantum/melange_connection.py
@@ -92,7 +92,7 @@ class MelangeConnection(object):
response_str = response.read()
if response.status < 400:
return response_str
- raise Exception(_("Server returned error: %s" % response_str))
+ raise Exception(_("Server returned error: %s") % response_str)
except (socket.error, IOError), e:
LOG.exception(_('Connection error contacting melange'
' service, retrying'))
diff --git a/nova/network/quantum/nova_ipam_lib.py b/nova/network/quantum/nova_ipam_lib.py
index e49efc2ec..b594913f2 100644
--- a/nova/network/quantum/nova_ipam_lib.py
+++ b/nova/network/quantum/nova_ipam_lib.py
@@ -87,7 +87,7 @@ class QuantumNovaIPAMLib(object):
admin_context = context.elevated()
network = db.network_get_by_uuid(admin_context, net_id)
if not network:
- raise Exception(_("No network with net_id = %s" % net_id))
+ raise Exception(_("No network with net_id = %s") % net_id)
manager.FlatManager.delete_network(self.net_manager,
admin_context, None,
network['uuid'],
@@ -218,8 +218,8 @@ class QuantumNovaIPAMLib(object):
{'allocated': False,
'virtual_interface_id': None})
if len(fixed_ips) == 0:
- LOG.error(_('No fixed IPs to deallocate for vif %s' %
- vif_ref['id']))
+ LOG.error(_('No fixed IPs to deallocate for vif %s') %
+ vif_ref['id'])
def get_allocated_ips(self, context, subnet_id, project_id):
"""Returns a list of (ip, vif_id) pairs"""
diff --git a/nova/network/quantum/quantum_connection.py b/nova/network/quantum/quantum_connection.py
index 487a53af2..38931a39b 100644
--- a/nova/network/quantum/quantum_connection.py
+++ b/nova/network/quantum/quantum_connection.py
@@ -97,7 +97,7 @@ class QuantumClientConnection(object):
vNIC with the specified interface-id.
"""
LOG.debug(_("Connecting interface %(interface_id)s to "
- "net %(net_id)s for %(tenant_id)s" % locals()))
+ "net %(net_id)s for %(tenant_id)s") % locals())
port_data = {'port': {'state': 'ACTIVE'}}
for kw in kwargs:
port_data['port'][kw] = kwargs[kw]
@@ -111,7 +111,7 @@ class QuantumClientConnection(object):
def detach_and_delete_port(self, tenant_id, net_id, port_id):
"""Detach and delete the specified Quantum port."""
LOG.debug(_("Deleting port %(port_id)s on net %(net_id)s"
- " for %(tenant_id)s" % locals()))
+ " for %(tenant_id)s") % locals())
self.client.detach_resource(net_id, port_id, tenant=tenant_id)
self.client.delete_port(net_id, port_id, tenant=tenant_id)
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index 83ca4aa7a..f4532e828 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -112,7 +112,7 @@ def notify(publisher_id, event_type, priority, payload):
"""
if priority not in log_levels:
raise BadPriorityException(
- _('%s not in valid priorities' % priority))
+ _('%s not in valid priorities') % priority)
# Ensure everything is JSON serializable.
payload = utils.to_primitive(payload, convert_instances=True)
@@ -128,5 +128,5 @@ def notify(publisher_id, event_type, priority, payload):
driver.notify(msg)
except Exception, e:
LOG.exception(_("Problem '%(e)s' attempting to "
- "send to notification system. Payload=%(payload)s" %
- locals()))
+ "send to notification system. Payload=%(payload)s") %
+ locals())
diff --git a/nova/rpc/impl_carrot.py b/nova/rpc/impl_carrot.py
index 26f4e4f07..806b4451d 100644
--- a/nova/rpc/impl_carrot.py
+++ b/nova/rpc/impl_carrot.py
@@ -223,7 +223,7 @@ class Consumer(messaging.Consumer):
# persistent failure occurs.
except Exception, e: # pylint: disable=W0703
if not self.failed_connection:
- LOG.exception(_('Failed to fetch message from queue: %s' % e))
+ LOG.exception(_('Failed to fetch message from queue: %s') % e)
self.failed_connection = True
diff --git a/nova/rpc/impl_kombu.py b/nova/rpc/impl_kombu.py
index 82103b38a..8f527467f 100644
--- a/nova/rpc/impl_kombu.py
+++ b/nova/rpc/impl_kombu.py
@@ -425,7 +425,7 @@ class Connection(object):
for consumer in self.consumers:
consumer.reconnect(self.channel)
LOG.info(_('Connected to AMQP server on '
- '%(hostname)s:%(port)d' % self.params))
+ '%(hostname)s:%(port)d') % self.params)
def reconnect(self):
"""Handles reconnecting and re-establishing queues.
diff --git a/nova/rpc/impl_qpid.py b/nova/rpc/impl_qpid.py
index 2e71d470d..c229f3d8e 100644
--- a/nova/rpc/impl_qpid.py
+++ b/nova/rpc/impl_qpid.py
@@ -337,12 +337,12 @@ class Connection(object):
try:
self.connection.open()
except qpid.messaging.exceptions.ConnectionError, e:
- LOG.error(_('Unable to connect to AMQP server: %s ' % str(e)))
+ LOG.error(_('Unable to connect to AMQP server: %s ') % e)
time.sleep(FLAGS.qpid_reconnect_interval or 1)
else:
break
- LOG.info(_('Connected to AMQP server on %s' % self.broker))
+ LOG.info(_('Connected to AMQP server on %s') % self.broker)
self.session = self.connection.session()
diff --git a/nova/scheduler/distributed_scheduler.py b/nova/scheduler/distributed_scheduler.py
index 0a9436045..e841eb5fd 100644
--- a/nova/scheduler/distributed_scheduler.py
+++ b/nova/scheduler/distributed_scheduler.py
@@ -48,7 +48,7 @@ class DistributedScheduler(driver.Scheduler):
NOTE: We're only focused on compute instances right now,
so this method will always raise NoValidHost()."""
- msg = _("No host selection for %s defined." % topic)
+ msg = _("No host selection for %s defined.") % topic
raise exception.NoValidHost(reason=msg)
def schedule_run_instance(self, context, request_spec, *args, **kwargs):
@@ -72,7 +72,7 @@ class DistributedScheduler(driver.Scheduler):
*args, **kwargs)
if not weighted_hosts:
- raise exception.NoValidHost(reason=_(""))
+ raise exception.NoValidHost(reason="")
# NOTE(comstud): Make sure we do not pass this through. It
# contains an instance of RpcContext that cannot be serialized.
@@ -106,7 +106,7 @@ class DistributedScheduler(driver.Scheduler):
hosts = self._schedule(context, 'compute', request_spec,
*args, **kwargs)
if not hosts:
- raise exception.NoValidHost(reason=_(""))
+ raise exception.NoValidHost(reason="")
host = hosts.pop(0)
# NOTE(comstud): Make sure we do not pass this through. It
diff --git a/nova/scheduler/scheduler_options.py b/nova/scheduler/scheduler_options.py
index c4b7bdbfd..b328cc2ab 100644
--- a/nova/scheduler/scheduler_options.py
+++ b/nova/scheduler/scheduler_options.py
@@ -64,7 +64,7 @@ class SchedulerOptions(object):
return os.path.getmtime(filename)
except os.error, e:
LOG.exception(_("Could not stat scheduler options file "
- "%(filename)s: '%(e)s'", locals()))
+ "%(filename)s: '%(e)s'"), locals())
raise
def _load_file(self, handle):
diff --git a/nova/scheduler/vsa.py b/nova/scheduler/vsa.py
index fc888dc11..f47c19c96 100644
--- a/nova/scheduler/vsa.py
+++ b/nova/scheduler/vsa.py
@@ -181,7 +181,7 @@ class VsaScheduler(simple.SimpleScheduler):
selected_hosts,
unique)
if host is None:
- raise exception.NoValidHost(reason=_(""))
+ raise exception.NoValidHost(reason="")
return (host, qos_cap)
diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py
index 38e0b2077..802823827 100644
--- a/nova/tests/fake_utils.py
+++ b/nova/tests/fake_utils.py
@@ -93,7 +93,7 @@ def fake_execute(*cmd_parts, **kwargs):
run_as_root=run_as_root,
check_exit_code=check_exit_code)
except exception.ProcessExecutionError as e:
- LOG.debug(_('Faked command raised an exception %s' % str(e)))
+ LOG.debug(_('Faked command raised an exception %s') % e)
raise
stdout = reply[0]
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index fb932d4a4..5c5a9d657 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -633,8 +633,8 @@ class XenAPIVMTestCase(test.TestCase):
# mount point will be the last item of the command list
self._tmpdir = cmd[len(cmd) - 1]
- LOG.debug(_('Creating files in %s to simulate guest agent' %
- self._tmpdir))
+ LOG.debug(_('Creating files in %s to simulate guest agent') %
+ self._tmpdir)
os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
# Touch the file using open
open(os.path.join(self._tmpdir, 'usr', 'sbin',
@@ -644,8 +644,8 @@ class XenAPIVMTestCase(test.TestCase):
def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
# Umount would normall make files in the m,ounted filesystem
# disappear, so do that here
- LOG.debug(_('Removing simulated guest agent files in %s' %
- self._tmpdir))
+ LOG.debug(_('Removing simulated guest agent files in %s') %
+ self._tmpdir)
os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
'xe-update-networking'))
os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
diff --git a/nova/utils.py b/nova/utils.py
index 0ea6ee93b..10a8c304b 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -830,23 +830,23 @@ def synchronized(name, external=False):
_semaphores[name] = semaphore.Semaphore()
sem = _semaphores[name]
LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
- '"%(method)s"...' % {'lock': name,
- 'method': f.__name__}))
+ '"%(method)s"...') % {'lock': name,
+ 'method': f.__name__})
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
- '"%(method)s"...' % {'lock': name,
- 'method': f.__name__}))
+ '"%(method)s"...') % {'lock': name,
+ 'method': f.__name__})
if external and not FLAGS.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
- 'method "%(method)s"...' %
- {'lock': name, 'method': f.__name__}))
+ 'method "%(method)s"...') %
+ {'lock': name, 'method': f.__name__})
lock_file_path = os.path.join(FLAGS.lock_path,
'nova-%s' % name)
lock = lockfile.FileLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" for '
- 'method "%(method)s"...' %
- {'lock': name, 'method': f.__name__}))
+ 'method "%(method)s"...') %
+ {'lock': name, 'method': f.__name__})
retval = f(*args, **kwargs)
else:
retval = f(*args, **kwargs)
@@ -903,15 +903,15 @@ def cleanup_file_locks():
if match is None:
continue
pid = match.group(1)
- LOG.debug(_('Found sentinel %(filename)s for pid %(pid)s' %
- {'filename': filename, 'pid': pid}))
+ LOG.debug(_('Found sentinel %(filename)s for pid %(pid)s') %
+ {'filename': filename, 'pid': pid})
try:
os.kill(int(pid), 0)
except OSError, e:
# PID wasn't found
delete_if_exists(os.path.join(FLAGS.lock_path, filename))
- LOG.debug(_('Cleaned sentinel %(filename)s for pid %(pid)s' %
- {'filename': filename, 'pid': pid}))
+ LOG.debug(_('Cleaned sentinel %(filename)s for pid %(pid)s') %
+ {'filename': filename, 'pid': pid})
# cleanup lock files
for filename in files:
@@ -925,13 +925,13 @@ def cleanup_file_locks():
continue
else:
raise
- msg = _('Found lockfile %(file)s with link count %(count)d' %
- {'file': filename, 'count': stat_info.st_nlink})
+ msg = (_('Found lockfile %(file)s with link count %(count)d') %
+ {'file': filename, 'count': stat_info.st_nlink})
LOG.debug(msg)
if stat_info.st_nlink == 1:
delete_if_exists(os.path.join(FLAGS.lock_path, filename))
- msg = _('Cleaned lockfile %(file)s with link count %(count)d' %
- {'file': filename, 'count': stat_info.st_nlink})
+ msg = (_('Cleaned lockfile %(file)s with link count %(count)d') %
+ {'file': filename, 'count': stat_info.st_nlink})
LOG.debug(msg)
@@ -1071,7 +1071,7 @@ def parse_server_string(server_str):
return (address, port)
except Exception:
- LOG.debug(_('Invalid server_string: %s' % server_str))
+ LOG.debug(_('Invalid server_string: %s') % server_str)
return ('', '')
diff --git a/nova/virt/baremetal/dom.py b/nova/virt/baremetal/dom.py
index 423d273e7..04c889277 100644
--- a/nova/virt/baremetal/dom.py
+++ b/nova/virt/baremetal/dom.py
@@ -112,7 +112,7 @@ class BareMetalDom(object):
self.domains.remove(dom)
continue
- LOG.debug(_(self.domains))
+ LOG.debug(self.domains)
self.store_domain()
def reboot_domain(self, name):
diff --git a/nova/virt/baremetal/proxy.py b/nova/virt/baremetal/proxy.py
index cd2427a6d..788386edd 100644
--- a/nova/virt/baremetal/proxy.py
+++ b/nova/virt/baremetal/proxy.py
@@ -253,7 +253,7 @@ class ProxyConnection(driver.ComputeDriver):
network_info=network_info,
block_device_info=block_device_info)
LOG.debug(_("instance %s: is building"), instance['name'])
- LOG.debug(_(xml_dict))
+ LOG.debug(xml_dict)
def _wait_for_boot():
try:
@@ -471,8 +471,8 @@ class ProxyConnection(driver.ComputeDriver):
for injection in ('metadata', 'key', 'net'):
if locals()[injection]:
LOG.info(_('instance %(inst_name)s: injecting '
- '%(injection)s into image %(img_id)s'
- % locals()))
+ '%(injection)s into image %(img_id)s')
+ % locals())
try:
disk.inject_data(injection_path, key, net, metadata,
partition=target_partition,
diff --git a/nova/virt/baremetal/tilera.py b/nova/virt/baremetal/tilera.py
index d79ce153d..59d4edf61 100644
--- a/nova/virt/baremetal/tilera.py
+++ b/nova/virt/baremetal/tilera.py
@@ -272,14 +272,13 @@ class BareMetalNodes(object):
out_msg = file.readline().find("Unreachable")
utils.execute('sudo', 'rm', tile_output)
if out_msg == -1:
- cmd = ("TILERA_BOARD_#" + str(node_id) + " " + node_ip +
- " is ready")
- LOG.debug(_(cmd))
+ cmd = _("TILERA_BOARD_#%(node_id)s %(node_ip)s is ready")
+ LOG.debug(cmd % locals())
return True
else:
- cmd = ("TILERA_BOARD_#" + str(node_id) + " " +
- node_ip + " is not ready, out_msg=" + out_msg)
- LOG.debug(_(cmd))
+ cmd = _("TILERA_BOARD_#%(node_id)s %(node_ip)s is not ready,"
+ " out_msg=%(out_msg)s")
+ LOG.debug(cmd % local())
self.power_mgr(node_id, 2)
return False
@@ -290,8 +289,8 @@ class BareMetalNodes(object):
From basepath to /tftpboot, kernel is set based on the given mode
such as 0-NoSet, 1-SetVmlinux, or 9-RemoveVmlinux.
"""
- cmd = "Noting to do for tilera nodes: vmlinux is in CF"
- LOG.debug(_(cmd))
+ cmd = _("Noting to do for tilera nodes: vmlinux is in CF")
+ LOG.debug(cmd)
def sleep_mgr(self, time_in_seconds):
"""
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 8fdc59b80..a9b1067c2 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -162,7 +162,7 @@ class _DiskImage(object):
for cls in (loop.Mount, nbd.Mount, guestfs.Mount):
if cls.mode == mode:
return cls
- raise exception.Error(_("unknown disk image handler: %s" % mode))
+ raise exception.Error(_("unknown disk image handler: %s") % mode)
def mount(self):
"""Mount a disk image, using the object attributes.
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index c9f42511c..bcb58b6fb 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -1232,8 +1232,8 @@ class LibvirtConnection(driver.ComputeDriver):
for injection in ('metadata', 'key', 'net', 'admin_password'):
if locals()[injection]:
- LOG.info(_('Injecting %(injection)s into image %(img_id)s'
- % locals()), instance=instance)
+ LOG.info(_('Injecting %(injection)s into image %(img_id)s')
+ % locals(), instance=instance)
try:
disk.inject_data(injection_path,
key, net, metadata, admin_password,
@@ -1810,7 +1810,7 @@ class LibvirtConnection(driver.ComputeDriver):
LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
dic = utils.loads(cpu_info)
xml = str(Template(self.cpuinfo_xml, searchList=dic))
- LOG.info(_('to xml...\n:%s ' % xml))
+ LOG.info(_('to xml...\n:%s ') % xml)
u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 83103d6f2..6da78f7e5 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -728,7 +728,7 @@ class VMHelper(HelperBase):
vdis = json.loads(result)
for vdi in vdis:
LOG.debug(_("xapi 'download_vhd' returned VDI of "
- "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'" % vdi))
+ "type '%(vdi_type)s' with UUID '%(vdi_uuid)s'") % vdi)
cls.scan_sr(session, instance, sr_ref)
@@ -756,7 +756,7 @@ class VMHelper(HelperBase):
cur_vdi_uuid = vdi_rec['uuid']
vdi_size_bytes = int(vdi_rec['physical_utilisation'])
LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes='
- '%(vdi_size_bytes)d' % locals()))
+ '%(vdi_size_bytes)d') % locals())
size_bytes += vdi_size_bytes
return size_bytes
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 306e45891..e0900cc53 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1269,8 +1269,8 @@ class VMOps(object):
rescue_vm_ref = VMHelper.lookup(self._session,
"%s-rescue" % instance.name)
if rescue_vm_ref:
- raise RuntimeError(_(
- "Instance is already in Rescue Mode: %s" % instance.name))
+ raise RuntimeError(_("Instance is already in Rescue Mode: %s")
+ % instance.name)
vm_ref = VMHelper.lookup(self._session, instance.name)
self._shutdown(instance, vm_ref)
diff --git a/nova/volume/san.py b/nova/volume/san.py
index 80ca88385..5d39ccc2e 100644
--- a/nova/volume/san.py
+++ b/nova/volume/san.py
@@ -694,7 +694,7 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
cluster_password))[:-1]
header['Authorization'] = 'Basic %s' % auth_key
- LOG.debug(_("Payload for SolidFire API call: %s" % payload))
+ LOG.debug(_("Payload for SolidFire API call: %s") % payload)
connection = httplib.HTTPSConnection(host, port)
connection.request('POST', '/json-rpc/1.0', payload, header)
response = connection.getresponse()
@@ -711,12 +711,12 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
except (TypeError, ValueError), exc:
connection.close()
- msg = _("Call to json.loads() raised an exception: %s" % exc)
+ msg = _("Call to json.loads() raised an exception: %s") % exc
raise exception.SfJsonEncodeFailure(msg)
connection.close()
- LOG.debug(_("Results of SolidFire API call: %s" % data))
+ LOG.debug(_("Results of SolidFire API call: %s") % data)
return data
def _get_volumes_by_sfaccount(self, account_id):
@@ -730,7 +730,7 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
params = {'username': sf_account_name}
data = self._issue_api_request('GetAccountByName', params)
if 'result' in data and 'account' in data['result']:
- LOG.debug(_('Found solidfire account: %s' % sf_account_name))
+ LOG.debug(_('Found solidfire account: %s') % sf_account_name)
sfaccount = data['result']['account']
return sfaccount
@@ -744,8 +744,8 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
sf_account_name = socket.gethostname() + '-' + nova_project_id
sfaccount = self._get_sfaccount_by_name(sf_account_name)
if sfaccount is None:
- LOG.debug(_('solidfire account: %s does not exist, create it...'
- % sf_account_name))
+ LOG.debug(_('solidfire account: %s does not exist, create it...')
+ % sf_account_name)
chap_secret = self._generate_random_string(12)
params = {'username': sf_account_name,
'initiatorSecret': chap_secret,
@@ -878,7 +878,7 @@ class SolidFireSanISCSIDriver(SanISCSIDriver):
volid = v['volumeID']
if found_count != 1:
- LOG.debug(_("Deleting volumeID: %s " % volid))
+ LOG.debug(_("Deleting volumeID: %s ") % volid)
raise exception.DuplicateSfVolumeNames(vol_name=volume['name'])
params = {'volumeID': volid}