summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjaypipes@gmail.com <>2010-12-11 15:23:40 -0500
committerjaypipes@gmail.com <>2010-12-11 15:23:40 -0500
commit12802a76c775a35e9d5a651bf896cfa25bec547f (patch)
tree8c4411a54ef3a8cabc57a51dd930bd8d6683967a
parenta6645d8a431ed933eef4ea6c42c0224ead6f2272 (diff)
downloadnova-12802a76c775a35e9d5a651bf896cfa25bec547f.tar.gz
nova-12802a76c775a35e9d5a651bf896cfa25bec547f.tar.xz
nova-12802a76c775a35e9d5a651bf896cfa25bec547f.zip
First round of i18n-ifying strings in Nova
-rw-r--r--nova/api/cloudpipe/__init__.py4
-rw-r--r--nova/api/ec2/__init__.py6
-rw-r--r--nova/api/ec2/apirequest.py4
-rw-r--r--nova/api/ec2/cloud.py51
-rw-r--r--nova/api/ec2/metadatarequesthandler.py2
-rw-r--r--nova/api/openstack/__init__.py6
-rw-r--r--nova/auth/dbdriver.py20
-rw-r--r--nova/auth/fakeldap.py2
-rw-r--r--nova/auth/ldapdriver.py69
-rw-r--r--nova/auth/manager.py30
-rw-r--r--nova/cloudpipe/pipelib.py2
-rw-r--r--nova/compute/api.py12
-rw-r--r--nova/compute/disk.py16
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/compute/manager.py32
-rw-r--r--nova/compute/monitor.py12
-rw-r--r--nova/crypto.py18
-rw-r--r--nova/db/sqlalchemy/api.py48
-rw-r--r--nova/exception.py8
-rw-r--r--nova/fakerabbit.py12
-rw-r--r--nova/image/glance.py8
-rw-r--r--nova/image/s3.py3
-rw-r--r--nova/network/linux_net.py10
-rw-r--r--nova/network/manager.py17
-rw-r--r--nova/objectstore/handler.py20
-rw-r--r--nova/process.py2
-rw-r--r--nova/rpc.py36
-rw-r--r--nova/scheduler/chance.py2
-rw-r--r--nova/scheduler/driver.py2
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/simple.py13
-rw-r--r--nova/server.py4
-rw-r--r--nova/service.py12
-rw-r--r--nova/twistd.py6
-rw-r--r--nova/utils.py14
-rw-r--r--nova/validate.py12
36 files changed, 267 insertions, 252 deletions
diff --git a/nova/api/cloudpipe/__init__.py b/nova/api/cloudpipe/__init__.py
index 6d40990a8..00ad38913 100644
--- a/nova/api/cloudpipe/__init__.py
+++ b/nova/api/cloudpipe/__init__.py
@@ -45,7 +45,7 @@ class API(wsgi.Application):
def __call__(self, req):
if req.method == 'POST':
return self.sign_csr(req)
- _log.debug("Cloudpipe path is %s" % req.path_info)
+ _log.debug(_("Cloudpipe path is %s") % req.path_info)
if req.path_info.endswith("/getca/"):
return self.send_root_ca(req)
return webob.exc.HTTPNotFound()
@@ -56,7 +56,7 @@ class API(wsgi.Application):
return instance['project_id']
def send_root_ca(self, req):
- _log.debug("Getting root ca")
+ _log.debug(_("Getting root ca"))
project_id = self.get_project_id_from_ip(req.remote_addr)
res = webob.Response()
res.headers["Content-Type"] = "text/plain"
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index a6ee16c33..dd87d1f71 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -77,7 +77,7 @@ class Authenticate(wsgi.Middleware):
req.host,
req.path)
except exception.Error, ex:
- logging.debug("Authentication Failure: %s" % ex)
+ logging.debug(_("Authentication Failure: %s") % ex)
raise webob.exc.HTTPForbidden()
# Authenticated!
@@ -120,9 +120,9 @@ class Router(wsgi.Middleware):
except:
raise webob.exc.HTTPBadRequest()
- _log.debug('action: %s' % action)
+ _log.debug(_('action: %s') % action)
for key, value in args.items():
- _log.debug('arg: %s\t\tval: %s' % (key, value))
+ _log.debug(_('arg: %s\t\tval: %s') % (key, value))
# Success!
req.environ['ec2.controller'] = controller
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 5758781b6..a90fbeb0c 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -92,8 +92,8 @@ class APIRequest(object):
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
- _error = ('Unsupported API request: controller = %s,'
- 'action = %s') % (self.controller, self.action)
+ _error = _('Unsupported API request: controller = %s,'
+ 'action = %s') % (self.controller, self.action)
_log.warning(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 05f8c3d0b..896e6c223 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -114,7 +114,7 @@ class CloudController(object):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
- utils.runthis("Generating root CA: %s", "sh genrootca.sh")
+ utils.runthis(_("Generating root CA: %s", "sh genrootca.sh"))
os.chdir(start)
def _get_mpi_data(self, context, project_id):
@@ -318,11 +318,11 @@ class CloudController(object):
ip_protocol = str(ip_protocol)
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
- raise InvalidInputException('%s is not a valid ipProtocol' %
+ raise InvalidInputException(_('%s is not a valid ipProtocol') %
(ip_protocol,))
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
- raise InvalidInputException('Invalid port range')
+ raise InvalidInputException(_('Invalid port range'))
values['protocol'] = ip_protocol
values['from_port'] = from_port
@@ -360,7 +360,7 @@ class CloudController(object):
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria == None:
- raise exception.ApiError("No rule for the specified parameters.")
+ raise exception.ApiError(_("No rule for the specified parameters."))
for rule in security_group.rules:
match = True
@@ -371,7 +371,7 @@ class CloudController(object):
db.security_group_rule_destroy(context, rule['id'])
self._trigger_refresh_security_group(context, security_group)
return True
- raise exception.ApiError("No rule for the specified parameters.")
+ raise exception.ApiError(_("No rule for the specified parameters."))
# TODO(soren): This has only been tested with Boto as the client.
# Unfortunately, it seems Boto is using an old API
@@ -387,8 +387,8 @@ class CloudController(object):
values['parent_group_id'] = security_group.id
if self._security_group_rule_exists(security_group, values):
- raise exception.ApiError('This rule already exists in group %s' %
- group_name)
+ raise exception.ApiError(_('This rule already exists in group %s')
+ % group_name)
security_group_rule = db.security_group_rule_create(context, values)
@@ -416,7 +416,7 @@ class CloudController(object):
def create_security_group(self, context, group_name, group_description):
self.compute_api.ensure_default_security_group(context)
if db.security_group_exists(context, context.project_id, group_name):
- raise exception.ApiError('group %s already exists' % group_name)
+ raise exception.ApiError(_('group %s already exists') % group_name)
group = {'user_id': context.user.id,
'project_id': context.project_id,
@@ -527,13 +527,13 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
if not re.match("^/dev/[a-z]d[a-z]+$", device):
- raise exception.ApiError("Invalid device specified: %s. "
- "Example device: /dev/vdb" % device)
+ raise exception.ApiError(_("Invalid device specified: %s. "
+ "Example device: /dev/vdb") % device)
# TODO(vish): abstract status checking?
if volume_ref['status'] != "available":
- raise exception.ApiError("Volume status must be available")
+ raise exception.ApiError(_("Volume status must be available"))
if volume_ref['attach_status'] == "attached":
- raise exception.ApiError("Volume is already attached")
+ raise exception.ApiError(_("Volume is already attached"))
internal_id = ec2_id_to_internal_id(instance_id)
instance_ref = self.compute_api.get_instance(context, internal_id)
host = instance_ref['host']
@@ -555,10 +555,10 @@ class CloudController(object):
instance_ref = db.volume_get_instance(context.elevated(),
volume_ref['id'])
if not instance_ref:
- raise exception.ApiError("Volume isn't attached to anything!")
+ raise exception.ApiError(_("Volume isn't attached to anything!"))
# TODO(vish): abstract status checking?
if volume_ref['status'] == "available":
- raise exception.ApiError("Volume is already detached")
+ raise exception.ApiError(_("Volume is already detached"))
try:
host = instance_ref['host']
rpc.cast(context,
@@ -687,10 +687,11 @@ class CloudController(object):
def allocate_address(self, context, **kwargs):
# check quota
if quota.allowed_floating_ips(context, 1) < 1:
- logging.warn("Quota exceeeded for %s, tried to allocate address",
+ logging.warn(_("Quota exceeeded for %s, tried to allocate "
+ "address"),
context.project_id)
- raise quota.QuotaError("Address quota exceeded. You cannot "
- "allocate any more addresses")
+ raise quota.QuotaError(_("Address quota exceeded. You cannot "
+ "allocate any more addresses"))
network_topic = self._get_network_topic(context)
public_ip = rpc.call(context,
network_topic,
@@ -803,7 +804,7 @@ class CloudController(object):
# TODO: return error if not authorized
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
if volume_ref['status'] != "available":
- raise exception.ApiError("Volume status must be available")
+ raise exception.ApiError(_("Volume status must be available"))
now = datetime.datetime.utcnow()
db.volume_update(context, volume_ref['id'], {'status': 'deleting',
'terminated_at': now})
@@ -834,11 +835,12 @@ class CloudController(object):
def describe_image_attribute(self, context, image_id, attribute, **kwargs):
if attribute != 'launchPermission':
- raise exception.ApiError('attribute not supported: %s' % attribute)
+ raise exception.ApiError(_('attribute not supported: %s')
+ % attribute)
try:
image = self.image_service.show(context, image_id)
except IndexError:
- raise exception.ApiError('invalid id: %s' % image_id)
+ raise exception.ApiError(_('invalid id: %s') % image_id)
result = {'image_id': image_id, 'launchPermission': []}
if image['isPublic']:
result['launchPermission'].append({'group': 'all'})
@@ -848,13 +850,14 @@ class CloudController(object):
operation_type, **kwargs):
# TODO(devcamcar): Support users and groups other than 'all'.
if attribute != 'launchPermission':
- raise exception.ApiError('attribute not supported: %s' % attribute)
+ raise exception.ApiError(_('attribute not supported: %s')
+ % attribute)
if not 'user_group' in kwargs:
- raise exception.ApiError('user or group not specified')
+ raise exception.ApiError(_('user or group not specified'))
if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all':
- raise exception.ApiError('only group "all" is supported')
+ raise exception.ApiError(_('only group "all" is supported'))
if not operation_type in ['add', 'remove']:
- raise exception.ApiError('operation_type must be add or remove')
+ raise exception.ApiError(_('operation_type must be add or remove'))
return self.image_service.modify(context, image_id, operation_type)
def update_image(self, context, image_id, **kwargs):
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index 2f4f414cc..0e9e686ff 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -65,7 +65,7 @@ class MetadataRequestHandler(object):
cc = cloud.CloudController()
meta_data = cc.get_metadata(req.remote_addr)
if meta_data is None:
- logging.error('Failed to get metadata for ip: %s' %
+ logging.error(_('Failed to get metadata for ip: %s') %
req.remote_addr)
raise webob.exc.HTTPNotFound()
data = self.lookup(req.path_info, meta_data)
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index c9efe5222..45a2549c0 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -65,7 +65,7 @@ class API(wsgi.Middleware):
try:
return req.get_response(self.application)
except Exception as ex:
- logging.warn("Caught error: %s" % str(ex))
+ logging.warn(_("Caught error: %s") % str(ex))
logging.debug(traceback.format_exc())
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
return faults.Fault(exc)
@@ -134,7 +134,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
if delay:
# TODO(gundlach): Get the retry-after format correct.
exc = webob.exc.HTTPRequestEntityTooLarge(
- explanation='Too many requests.',
+ explanation=_('Too many requests.'),
headers={'Retry-After': time.time() + delay})
raise faults.Fault(exc)
return self.application
@@ -188,7 +188,7 @@ class APIRouter(wsgi.Router):
controller=sharedipgroups.Controller())
if FLAGS.allow_admin_api:
- logging.debug("Including admin operations in API.")
+ logging.debug(_("Including admin operations in API."))
# TODO: Place routes for admin operations here.
super(APIRouter, self).__init__(mapper)
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index a1584322b..47e435cb6 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -37,7 +37,6 @@ class DbDriver(object):
def __init__(self):
"""Imports the LDAP module"""
pass
- db
def __enter__(self):
return self
@@ -83,7 +82,7 @@ class DbDriver(object):
user_ref = db.user_create(context.get_admin_context(), values)
return self._db_user_to_auth_user(user_ref)
except exception.Duplicate, e:
- raise exception.Duplicate('User %s already exists' % name)
+ raise exception.Duplicate(_('User %s already exists') % name)
def _db_user_to_auth_user(self, user_ref):
return {'id': user_ref['id'],
@@ -105,8 +104,9 @@ class DbDriver(object):
"""Create a project"""
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
- raise exception.NotFound("Project can't be created because "
- "manager %s doesn't exist" % manager_uid)
+ raise exception.NotFound(_("Project can't be created because "
+ "manager %s doesn't exist")
+ % manager_uid)
# description is a required attribute
if description is None:
@@ -133,8 +133,8 @@ class DbDriver(object):
try:
project = db.project_create(context.get_admin_context(), values)
except exception.Duplicate:
- raise exception.Duplicate("Project can't be created because "
- "project %s already exists" % name)
+ raise exception.Duplicate(_("Project can't be created because "
+ "project %s already exists") % name)
for member in members:
db.project_add_member(context.get_admin_context(),
@@ -155,8 +155,8 @@ class DbDriver(object):
if manager_uid:
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
- raise exception.NotFound("Project can't be modified because "
- "manager %s doesn't exist" %
+ raise exception.NotFound(_("Project can't be modified because "
+ "manager %s doesn't exist") %
manager_uid)
values['project_manager'] = manager['id']
if description:
@@ -243,8 +243,8 @@ class DbDriver(object):
def _validate_user_and_project(self, user_id, project_id):
user = db.user_get(context.get_admin_context(), user_id)
if not user:
- raise exception.NotFound('User "%s" not found' % user_id)
+ raise exception.NotFound(_('User "%s" not found') % user_id)
project = db.project_get(context.get_admin_context(), project_id)
if not project:
- raise exception.NotFound('Project "%s" not found' % project_id)
+ raise exception.NotFound(_('Project "%s" not found') % project_id)
return user, project
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 46e0135b4..cdab96b79 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -39,7 +39,7 @@ flags.DEFINE_integer('redis_db', 0, 'Multiple DB keeps tests away')
class Redis(object):
def __init__(self):
if hasattr(self.__class__, '_instance'):
- raise Exception('Attempted to instantiate singleton')
+ raise Exception(_('Attempted to instantiate singleton'))
@classmethod
def instance(cls):
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index c10939d74..e289ea5a2 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -159,7 +159,7 @@ class LdapDriver(object):
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
- raise exception.NotFound("LDAP object for %s doesn't exist"
+ raise exception.NotFound(_("LDAP object for %s doesn't exist")
% name)
else:
attr = [
@@ -182,11 +182,12 @@ class LdapDriver(object):
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
- raise exception.Duplicate("Project can't be created because "
- "project %s already exists" % name)
+ raise exception.Duplicate(_("Project can't be created because "
+ "project %s already exists") % name)
if not self.__user_exists(manager_uid):
- raise exception.NotFound("Project can't be created because "
- "manager %s doesn't exist" % manager_uid)
+ raise exception.NotFound(_("Project can't be created because "
+ "manager %s doesn't exist")
+ % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
@@ -195,8 +196,8 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound("Project can't be created "
- "because user %s doesn't exist"
+ raise exception.NotFound(_("Project can't be created "
+ "because user %s doesn't exist")
% member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
@@ -218,9 +219,9 @@ class LdapDriver(object):
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
- raise exception.NotFound("Project can't be modified because "
- "manager %s doesn't exist" %
- manager_uid)
+ raise exception.NotFound(_("Project can't be modified because "
+ "manager %s doesn't exist")
+ % manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, 'projectManager', manager_dn))
if description:
@@ -416,8 +417,9 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound("Group can't be created "
- "because user %s doesn't exist" % member_uid)
+ raise exception.NotFound(_("Group can't be created "
+ "because user %s doesn't exist")
+ % member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -432,8 +434,9 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be searched in group "
- "becuase the user doesn't exist" % (uid,))
+ raise exception.NotFound(_("User %s can't be searched in group "
+ "because the user doesn't exist")
+ % uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
@@ -444,28 +447,30 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be added to the group "
- "becuase the user doesn't exist" % (uid,))
+ raise exception.NotFound(_("User %s can't be added to the group "
+ "because the user doesn't exist")
+ % uid)
if not self.__group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- (group_dn,))
+ raise exception.NotFound(_("The group at dn %s doesn't exist")
+ % group_dn)
if self.__is_in_group(uid, group_dn):
- raise exception.Duplicate("User %s is already a member of "
- "the group %s" % (uid, group_dn))
+ raise exception.Duplicate(_("User %s is already a member of "
+ "the group %s") % (uid, group_dn))
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound("The group at dn %s doesn't exist" %
- (group_dn,))
+ raise exception.NotFound(_("The group at dn %s doesn't exist")
+ % group_dn)
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be removed from the "
- "group because the user doesn't exist" % (uid,))
+ raise exception.NotFound(_("User %s can't be removed from the "
+ "group because the user doesn't exist")
+ % uid)
if not self.__is_in_group(uid, group_dn):
- raise exception.NotFound("User %s is not a member of the group" %
- (uid,))
+ raise exception.NotFound(_("User %s is not a member of the group")
+ % uid)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(
group_dn, uid)
@@ -479,15 +484,16 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
- logging.debug("Attempted to remove the last member of a group. "
- "Deleting the group at %s instead.", group_dn)
+ logging.debug(_("Attempted to remove the last member of a group. "
+ "Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
- raise exception.NotFound("User %s can't be removed from all "
- "because the user doesn't exist" % (uid,))
+ raise exception.NotFound(_("User %s can't be removed from all "
+ "because the user doesn't exist")
+ % uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -500,7 +506,8 @@ class LdapDriver(object):
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound("Group at dn %s doesn't exist" % group_dn)
+ raise exception.NotFound(_("Group at dn %s doesn't exist")
+ % group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 11c3bd6df..417f2b76d 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -257,12 +257,12 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
- logging.info('Looking up user: %r', access_key)
+ logging.info(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
logging.info('user: %r', user)
if user == None:
- raise exception.NotFound('No user found for access key %s' %
- access_key)
+ raise exception.NotFound(_('No user found for access key %s')
+ % access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
@@ -271,12 +271,12 @@ class AuthManager(object):
project = self.get_project(project_id)
if project == None:
- raise exception.NotFound('No project called %s could be found' %
- project_id)
+ raise exception.NotFound(_('No project called %s could be found')
+ % project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
- raise exception.NotFound('User %s is not a member of project %s' %
- (user.id, project.id))
+ raise exception.NotFound(_('User %s is not a member of project %s')
+ % (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@@ -284,7 +284,7 @@ class AuthManager(object):
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
- raise exception.NotAuthorized('Signature does not match')
+ raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
@@ -294,7 +294,7 @@ class AuthManager(object):
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
if signature != expected_signature:
- raise exception.NotAuthorized('Signature does not match')
+ raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
def get_access_key(self, user, project):
@@ -364,7 +364,7 @@ class AuthManager(object):
with self.driver() as drv:
if role == 'projectmanager':
if not project:
- raise exception.Error("Must specify project")
+ raise exception.Error(_("Must specify project"))
return self.is_project_manager(user, project)
global_role = drv.has_role(User.safe_id(user),
@@ -398,9 +398,9 @@ class AuthManager(object):
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
- raise exception.NotFound("The %s role can not be found" % role)
+ raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
- raise exception.NotFound("The %s role is global only" % role)
+ raise exception.NotFound(_("The %s role is global only") % role)
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -546,7 +546,8 @@ class AuthManager(object):
Project.safe_id(project))
if not network_ref['vpn_public_port']:
- raise exception.NotFound('project network data has not been set')
+ raise exception.NotFound(_('project network data has not '
+ 'been set'))
return (network_ref['vpn_public_address'],
network_ref['vpn_public_port'])
@@ -659,8 +660,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
- logging.warn("No vpn data for project %s" %
- pid)
+ logging.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(user.id))
zippy.close()
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index 3472201cd..bbe91a70c 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -49,7 +49,7 @@ class CloudPipe(object):
self.manager = manager.AuthManager()
def launch_vpn_instance(self, project_id):
- logging.debug("Launching VPN for %s" % (project_id))
+ logging.debug(_("Launching VPN for %s") % (project_id))
project = self.manager.get_project(project_id)
# Make a payload.zip
tmpfolder = tempfile.mkdtemp()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 8e0efa4cc..e701e540e 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -120,7 +120,7 @@ class ComputeAPI(base.Base):
elevated = context.elevated()
instances = []
- logging.debug("Going to run %s instances...", num_instances)
+ logging.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@@ -157,7 +157,7 @@ class ComputeAPI(base.Base):
{"method": "setup_fixed_ip",
"args": {"address": address}})
- logging.debug("Casting to scheduler for %s/%s's instance %s",
+ logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
@@ -204,12 +204,12 @@ class ComputeAPI(base.Base):
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
- logging.warning("Instance %d was not found during terminate",
+ logging.warning(_("Instance %d was not found during terminate"),
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning("Instance %d is already being terminated",
+ logging.warning(_("Instance %d is already being terminated"),
instance_id)
return
@@ -223,7 +223,7 @@ class ComputeAPI(base.Base):
address = self.db.instance_get_floating_address(context,
instance['id'])
if address:
- logging.debug("Disassociating address %s" % address)
+ logging.debug(_("Disassociating address %s") % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
@@ -234,7 +234,7 @@ class ComputeAPI(base.Base):
address = self.db.instance_get_fixed_address(context, instance['id'])
if address:
- logging.debug("Deallocating address %s" % address)
+ logging.debug(_("Deallocating address %s") % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 4338d39f0..8701c3968 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -70,12 +70,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
yield execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
- logging.warn("Input partition size not evenly divisible by"
- " sector size: %d / %d", file_size, sector_size)
+ logging.warn(_("Input partition size not evenly divisible by"
+ " sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn("Bytes for local storage not evenly divisible"
- " by sector size: %d / %d", local_bytes, sector_size)
+ logging.warn(_("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
@@ -121,14 +121,15 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
"""
out, err = yield execute('sudo losetup -f --show %s' % image)
if err:
- raise exception.Error('Could not attach image to loopback: %s' % err)
+ raise exception.Error(_('Could not attach image to loopback: %s')
+ % err)
device = out.strip()
try:
if not partition is None:
# create partition
out, err = yield execute('sudo kpartx -a %s' % device)
if err:
- raise exception.Error('Failed to load partition: %s' % err)
+ raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
@@ -141,7 +142,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
out, err = yield execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
- raise exception.Error('Failed to mount filesystem: %s' % err)
+ raise exception.Error(_('Failed to mount filesystem: %s')
+ % err)
try:
if key:
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index a2679e0fc..000b3a6d9 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -37,7 +37,7 @@ def get_by_type(instance_type):
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError("Unknown instance type: %s",
+ raise exception.ApiError(_("Unknown instance type: %s"),
instance_type)
return instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dd8d41129..a63ad5e1b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -91,8 +91,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
- raise exception.Error("Instance has already been created")
- logging.debug("instance %s: starting...", instance_id)
+ raise exception.Error(_("Instance has already been created"))
+ logging.debug(_("instance %s: starting..."), instance_id)
self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
@@ -111,7 +111,7 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
- logging.exception("instance %s: Failed to spawn",
+ logging.exception(_("instance %s: Failed to spawn"),
instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
@@ -124,7 +124,7 @@ class ComputeManager(manager.Manager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
- logging.debug("instance %s: terminating", instance_id)
+ logging.debug(_("instance %s: terminating"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
volumes = instance_ref.get('volumes', []) or []
@@ -132,8 +132,8 @@ class ComputeManager(manager.Manager):
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
- raise exception.Error('trying to destroy already destroyed'
- ' instance: %s' % instance_id)
+ raise exception.Error(_('trying to destroy already destroyed'
+ ' instance: %s') % instance_id)
yield self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
@@ -148,13 +148,13 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)',
+ logging.warn(_('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING)
- logging.debug('instance %s: rebooting', instance_ref['name'])
+ logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -169,7 +169,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: rescuing',
+ logging.debug(_('instance %s: rescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@@ -185,7 +185,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: unrescuing',
+ logging.debug(_('instance %s: unrescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@@ -198,7 +198,7 @@ class ComputeManager(manager.Manager):
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
- logging.debug("instance %s: getting console output", instance_id)
+ logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_console_output(instance_ref)
@@ -208,7 +208,7 @@ class ComputeManager(manager.Manager):
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
- logging.debug("instance %s: attaching volume %s to %s", instance_id,
+ logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
dev_path = yield self.volume_manager.setup_compute_volume(context,
@@ -225,7 +225,7 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- logging.exception("instance %s: attach failed %s, removing",
+ logging.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint)
yield self.volume_manager.remove_compute_volume(context,
volume_id)
@@ -237,13 +237,13 @@ class ComputeManager(manager.Manager):
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
- logging.debug("instance %s: detaching volume %s",
+ logging.debug(_("instance %s: detaching volume %s"),
instance_id,
volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
if instance_ref['name'] not in self.driver.list_instances():
- logging.warn("Detaching volume from unknown instance %s",
+ logging.warn(_("Detaching volume from unknown instance %s"),
instance_ref['name'])
else:
yield self.driver.detach_volume(instance_ref['name'],
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 22653113a..60c347a5e 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -255,7 +255,7 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
- logging.debug('updating %s...', self.instance_id)
+ logging.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
@@ -285,7 +285,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
- logging.exception('unexpected error during update')
+ logging.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@@ -351,7 +351,7 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- logging.error('Cannot get blockstats for "%s" on "%s"',
+ logging.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
raise
@@ -373,7 +373,7 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- logging.error('Cannot get ifstats for "%s" on "%s"',
+ logging.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
raise
@@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
- logging.exception('unexpected exception getting connection')
+ logging.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service):
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug('Found instance: %s', domain_id)
+ logging.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]
diff --git a/nova/crypto.py b/nova/crypto.py
index aacc50b17..af4a06a0c 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -39,13 +39,13 @@ from nova import flags
FLAGS = flags.FLAGS
-flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
+flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA'))
flags.DEFINE_string('keys_path', '$state_path/keys',
- 'Where we keep our keys')
+ _('Where we keep our keys'))
flags.DEFINE_string('ca_path', '$state_path/CA',
- 'Where we keep our root CA')
+ _('Where we keep our root CA'))
flags.DEFINE_boolean('use_intermediate_ca', False,
- 'Should we use intermediate CAs for each project?')
+ _('Should we use intermediate CAs for each project?'))
def ca_path(project_id):
@@ -111,9 +111,9 @@ def generate_x509_cert(subject, bits=1024):
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
- utils.runthis("Generating private key: %s",
+ utils.runthis(_("Generating private key: %s"),
"openssl genrsa -out %s %s" % (keyfile, bits))
- utils.runthis("Generating CSR: %s",
+ utils.runthis(_("Generating CSR: %s"),
"openssl req -new -key %s -out %s -batch -subj %s" %
(keyfile, csrfile, subject))
private_key = open(keyfile).read()
@@ -131,7 +131,7 @@ def sign_csr(csr_text, intermediate=None):
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
- utils.runthis("Generating intermediate CA: %s",
+ utils.runthis(_("Generating intermediate CA: %s"),
"sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
@@ -142,11 +142,11 @@ def _sign_csr(csr_text, ca_folder):
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
- logging.debug("Flags path: %s" % ca_folder)
+ logging.debug(_("Flags path: %s") % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
- utils.runthis("Signing cert: %s",
+ utils.runthis(_("Signing cert: %s"),
"openssl ca -batch -out %s/outbound.crt "
"-config ./openssl.cnf -infiles %s/inbound.csr" %
(tmpfolder, tmpfolder))
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 55036d1d1..4bae170a9 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -41,7 +41,7 @@ FLAGS = flags.FLAGS
def is_admin_context(context):
"""Indicates if the request context is an administrator."""
if not context:
- warnings.warn('Use of empty request context is deprecated',
+ warnings.warn(_('Use of empty request context is deprecated'),
DeprecationWarning)
raise Exception('die')
return context.is_admin
@@ -130,7 +130,7 @@ def service_get(context, service_id, session=None):
first()
if not result:
- raise exception.NotFound('No service for id %s' % service_id)
+ raise exception.NotFound(_('No service for id %s') % service_id)
return result
@@ -227,7 +227,7 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('No service for %s, %s' % (host, binary))
+ raise exception.NotFound(_('No service for %s, %s') % (host, binary))
return result
@@ -491,7 +491,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
- raise exception.NotFound('No floating ip for address %s' % address)
+ raise exception.NotFound(_('No floating ip for address %s') % address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -591,7 +591,7 @@ def instance_get(context, instance_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound('No instance for id %s' % instance_id)
+ raise exception.NotFound(_('No instance for id %s') % instance_id)
return result
@@ -669,7 +669,7 @@ def instance_get_by_internal_id(context, internal_id):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound('Instance %s not found' % (internal_id))
+ raise exception.NotFound(_('Instance %s not found') % (internal_id))
return result
@@ -790,7 +790,7 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('no keypair for user %s, name %s' %
+ raise exception.NotFound(_('no keypair for user %s, name %s') %
(user_id, name))
return result
@@ -905,7 +905,7 @@ def network_get(context, network_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound('No network for id %s' % network_id)
+ raise exception.NotFound(_('No network for id %s') % network_id)
return result
@@ -933,7 +933,7 @@ def network_get_by_bridge(context, bridge):
first()
if not result:
- raise exception.NotFound('No network for bridge %s' % bridge)
+ raise exception.NotFound(_('No network for bridge %s') % bridge)
return result
@@ -947,7 +947,7 @@ def network_get_by_instance(_context, instance_id):
filter_by(deleted=False).\
first()
if not rv:
- raise exception.NotFound('No network for instance %s' % instance_id)
+ raise exception.NotFound(_('No network for instance %s') % instance_id)
return rv
@@ -961,7 +961,7 @@ def network_set_host(context, network_id, host_id):
with_lockmode('update').\
first()
if not network_ref:
- raise exception.NotFound('No network for id %s' % network_id)
+ raise exception.NotFound(_('No network for id %s') % network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
@@ -1073,7 +1073,7 @@ def auth_get_token(_context, token_hash):
filter_by(token_hash=token_hash).\
first()
if not tk:
- raise exception.NotFound('Token %s does not exist' % token_hash)
+ raise exception.NotFound(_('Token %s does not exist') % token_hash)
return tk
@@ -1097,7 +1097,7 @@ def quota_get(context, project_id, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('No quota for project_id %s' % project_id)
+ raise exception.NotFound(_('No quota for project_id %s') % project_id)
return result
@@ -1252,7 +1252,7 @@ def volume_get(context, volume_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound('No volume for id %s' % volume_id)
+ raise exception.NotFound(_('No volume for id %s') % volume_id)
return result
@@ -1308,7 +1308,7 @@ def volume_get_by_ec2_id(context, ec2_id):
raise exception.NotAuthorized()
if not result:
- raise exception.NotFound('Volume %s not found' % ec2_id)
+ raise exception.NotFound(_('Volume %s not found') % ec2_id)
return result
@@ -1332,7 +1332,7 @@ def volume_get_instance(context, volume_id):
options(joinedload('instance')).\
first()
if not result:
- raise exception.NotFound('Volume %s not found' % ec2_id)
+ raise exception.NotFound(_('Volume %s not found') % ec2_id)
return result.instance
@@ -1344,7 +1344,7 @@ def volume_get_shelf_and_blade(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
- raise exception.NotFound('No export device found for volume %s' %
+ raise exception.NotFound(_('No export device found for volume %s') %
volume_id)
return (result.shelf_id, result.blade_id)
@@ -1357,7 +1357,7 @@ def volume_get_iscsi_target_num(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
- raise exception.NotFound('No target id found for volume %s' %
+ raise exception.NotFound(_('No target id found for volume %s') %
volume_id)
return result.target_num
@@ -1402,7 +1402,7 @@ def security_group_get(context, security_group_id, session=None):
options(joinedload_all('rules')).\
first()
if not result:
- raise exception.NotFound("No secuity group with id %s" %
+ raise exception.NotFound(_("No security group with id %s") %
security_group_id)
return result
@@ -1419,7 +1419,7 @@ def security_group_get_by_name(context, project_id, group_name):
first()
if not result:
raise exception.NotFound(
- 'No security group named %s for project: %s' \
+ _('No security group named %s for project: %s')
% (group_name, project_id))
return result
@@ -1507,7 +1507,7 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
filter_by(id=security_group_rule_id).\
first()
if not result:
- raise exception.NotFound("No secuity group rule with id %s" %
+ raise exception.NotFound(_("No secuity group rule with id %s") %
security_group_rule_id)
return result
@@ -1543,7 +1543,7 @@ def user_get(context, id, session=None):
first()
if not result:
- raise exception.NotFound('No user for id %s' % id)
+ raise exception.NotFound(_('No user for id %s') % id)
return result
@@ -1559,7 +1559,7 @@ def user_get_by_access_key(context, access_key, session=None):
first()
if not result:
- raise exception.NotFound('No user for access key %s' % access_key)
+ raise exception.NotFound(_('No user for access key %s') % access_key)
return result
@@ -1621,7 +1621,7 @@ def project_get(context, id, session=None):
first()
if not result:
- raise exception.NotFound("No project with id %s" % id)
+ raise exception.NotFound(_("No project with id %s") % id)
return result
diff --git a/nova/exception.py b/nova/exception.py
index 6d6c37338..fd8b00368 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -30,11 +30,11 @@ class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,
description=None):
if description is None:
- description = "Unexpected error while running command."
+ description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
- message = "%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r" % (
- description, cmd, exit_code, stdout, stderr)
+ message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
+ % (description, cmd, exit_code, stdout, stderr)
IOError.__init__(self, message)
@@ -81,7 +81,7 @@ def wrap_exception(f):
except Exception, e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
- logging.exception('Uncaught exception')
+ logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index c64617931..41e686cff 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -37,12 +37,12 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
- logging.debug('(%s) publish (key: %s) %s',
+ logging.debug(_('(%s) publish (key: %s) %s'),
self.name, routing_key, message)
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
- logging.debug('Publishing to route %s', f)
+ logging.debug(_('Publishing to route %s'), f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
@@ -82,16 +82,16 @@ class Backend(object):
def queue_declare(self, queue, **kwargs):
if queue not in self._queues:
- logging.debug('Declaring queue %s', queue)
+ logging.debug(_('Declaring queue %s'), queue)
self._queues[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
if exchange not in self._exchanges:
- logging.debug('Declaring exchange %s', exchange)
+ logging.debug(_('Declaring exchange %s'), exchange)
self._exchanges[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
- logging.debug('Binding %s to %s with key %s',
+ logging.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
self._exchanges[exchange].bind(self._queues[queue].push,
routing_key)
@@ -117,7 +117,7 @@ class Backend(object):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
- logging.debug('Getting from %s: %s', queue, message)
+ logging.debug(_('Getting from %s: %s'), queue, message)
return message
def prepare_message(self, message_data, delivery_mode,
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 1ca6cf2eb..cb3936df1 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -77,8 +77,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn("Parallax returned HTTP error %d from "
- "request for /images", res.status_int)
+ logging.warn(_("Parallax returned HTTP error %d from "
+ "request for /images"), res.status_int)
return []
finally:
c.close()
@@ -96,8 +96,8 @@ class ParallaxClient(object):
data = json.loads(res.read())['images']
return data
else:
- logging.warn("Parallax returned HTTP error %d from "
- "request for /images/detail", res.status_int)
+ logging.warn(_("Parallax returned HTTP error %d from "
+ "request for /images/detail"), res.status_int)
return []
finally:
c.close()
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 0a25161de..7b04aa072 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -79,7 +79,8 @@ class S3ImageService(service.BaseImageService):
result = self.index(context)
result = [i for i in result if i['imageId'] == image_id]
if not result:
- raise exception.NotFound('Image %s could not be found' % image_id)
+ raise exception.NotFound(_('Image %s could not be found')
+ % image_id)
image = result[0]
return image
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 0fefd9415..16add7689 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -135,7 +135,7 @@ def ensure_vlan(vlan_num):
"""Create a vlan unless it already exists"""
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
- logging.debug("Starting VLAN inteface %s", interface)
+ logging.debug(_("Starting VLAN inteface %s"), interface)
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
_execute("sudo ifconfig %s up" % interface)
@@ -145,7 +145,7 @@ def ensure_vlan(vlan_num):
def ensure_bridge(bridge, interface, net_attrs=None):
"""Create a bridge unless it already exists"""
if not _device_exists(bridge):
- logging.debug("Starting Bridge interface for %s", interface)
+ logging.debug(_("Starting Bridge interface for %s"), interface)
_execute("sudo brctl addbr %s" % bridge)
_execute("sudo brctl setfd %s 0" % bridge)
# _execute("sudo brctl setageing %s 10" % bridge)
@@ -202,9 +202,9 @@ def update_dhcp(context, network_id):
_execute('sudo kill -HUP %d' % pid)
return
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug("Hupping dnsmasq threw %s", exc)
+ logging.debug(_("Hupping dnsmasq threw %s"), exc)
else:
- logging.debug("Pid %d is stale, relaunching dnsmasq", pid)
+ logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
# FLAGFILE and DNSMASQ_INTERFACE in env
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
@@ -276,7 +276,7 @@ def _stop_dnsmasq(network):
try:
_execute('sudo kill -TERM %d' % pid)
except Exception as exc: # pylint: disable-msg=W0703
- logging.debug("Killing dnsmasq threw %s", exc)
+ logging.debug(_("Killing dnsmasq threw %s"), exc)
def _dhcp_file(bridge, kind):
diff --git a/nova/network/manager.py b/nova/network/manager.py
index a7298b47f..e3677459d 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -116,7 +116,7 @@ class NetworkManager(manager.Manager):
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
- logging.debug("setting network host")
+ logging.debug(_("setting network host"))
host = self.db.network_set_host(context,
network_id,
self.host)
@@ -175,10 +175,10 @@ class NetworkManager(manager.Manager):
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
- raise exception.Error("IP %s leased that isn't associated" %
+ raise exception.Error(_("IP %s leased that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error("IP %s leased to bad mac %s vs %s" %
+ raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
@@ -186,7 +186,8 @@ class NetworkManager(manager.Manager):
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
- logging.warn("IP %s leased that was already deallocated", address)
+ logging.warn(_("IP %s leased that was already deallocated"),
+ address)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
@@ -194,13 +195,13 @@ class NetworkManager(manager.Manager):
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
- raise exception.Error("IP %s released that isn't associated" %
+ raise exception.Error(_("IP %s released that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
- raise exception.Error("IP %s released from bad mac %s vs %s" %
+ raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
if not fixed_ip_ref['leased']:
- logging.warn("IP %s released that was not leased", address)
+ logging.warn(_("IP %s released that was not leased"), address)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': False})
@@ -410,7 +411,7 @@ class VlanManager(NetworkManager):
self.host,
time)
if num:
- logging.debug("Dissassociated %s stale fixed ip(s)", num)
+ logging.debug(_("Dissassociated %s stale fixed ip(s)"), num)
def init_host(self):
"""Do any initialization that needs to be run if this is a
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
index c8920b00c..0c71c3705 100644
--- a/nova/objectstore/handler.py
+++ b/nova/objectstore/handler.py
@@ -102,7 +102,7 @@ def _render_parts(value, write_cb):
_render_parts(subsubvalue, write_cb)
write_cb('</' + utils.utf8(name) + '>')
else:
- raise Exception("Unknown S3 value type %r", value)
+ raise Exception(_("Unknown S3 value type %r"), value)
def get_argument(request, key, default_value):
@@ -134,7 +134,7 @@ def get_context(request):
check_type='s3')
return context.RequestContext(user, project)
except exception.Error as ex:
- logging.debug("Authentication Failure: %s", ex)
+ logging.debug(_("Authentication Failure: %s"), ex)
raise exception.NotAuthorized()
@@ -227,7 +227,7 @@ class BucketResource(ErrorHandlingResource):
def render_PUT(self, request):
"Creates the bucket resource"""
- logging.debug("Creating bucket %s", self.name)
+ logging.debug(_("Creating bucket %s"), self.name)
logging.debug("calling bucket.Bucket.create(%r, %r)",
self.name,
request.context)
@@ -237,7 +237,7 @@ class BucketResource(ErrorHandlingResource):
def render_DELETE(self, request):
"""Deletes the bucket resource"""
- logging.debug("Deleting bucket %s", self.name)
+ logging.debug(_("Deleting bucket %s"), self.name)
bucket_object = bucket.Bucket(self.name)
if not bucket_object.is_authorized(request.context):
@@ -261,7 +261,7 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug("Getting object: %s / %s", self.bucket.name, self.name)
+ logging.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized()
@@ -279,7 +279,7 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
- logging.debug("Putting object: %s / %s", self.bucket.name, self.name)
+ logging.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
if not self.bucket.is_authorized(request.context):
raise exception.NotAuthorized()
@@ -298,7 +298,7 @@ class ObjectResource(ErrorHandlingResource):
authorized to delete the object.
"""
- logging.debug("Deleting object: %s / %s",
+ logging.debug(_("Deleting object: %s / %s"),
self.bucket.name,
self.name)
@@ -394,17 +394,17 @@ class ImagesResource(resource.Resource):
image_id = get_argument(request, 'image_id', u'')
image_object = image.Image(image_id)
if not image_object.is_authorized(request.context):
- logging.debug("not authorized for render_POST in images")
+ logging.debug(_("not authorized for render_POST in images"))
raise exception.NotAuthorized()
operation = get_argument(request, 'operation', u'')
if operation:
# operation implies publicity toggle
- logging.debug("handling publicity toggle")
+ logging.debug(_("handling publicity toggle"))
image_object.set_public(operation == 'add')
else:
# other attributes imply update
- logging.debug("update user fields")
+ logging.debug(_("update user fields"))
clean_args = {}
for arg in request.args.keys():
clean_args[arg] = request.args[arg][0]
diff --git a/nova/process.py b/nova/process.py
index b33df048b..25b6723ec 100644
--- a/nova/process.py
+++ b/nova/process.py
@@ -131,7 +131,7 @@ def get_process_output(executable, args=None, env=None, path=None,
cmd = executable
if args:
cmd = " ".join([cmd] + args)
- logging.debug("Running cmd: %s", cmd)
+ logging.debug(_("Running cmd: %s"), cmd)
process_handler = BackRelayWithInput(
deferred,
cmd,
diff --git a/nova/rpc.py b/nova/rpc.py
index 86a29574f..cc3c7dfc8 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -91,15 +91,15 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
- logging.exception("AMQP server on %s:%d is unreachable." \
- " Trying again in %d seconds." % (
+ logging.exception(_("AMQP server on %s:%d is unreachable."
+ " Trying again in %d seconds.") % (
FLAGS.rabbit_host,
FLAGS.rabbit_port,
FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
- logging.exception("Unable to connect to AMQP server" \
- " after %d tries. Shutting down." % FLAGS.rabbit_max_retries)
+ logging.exception(_("Unable to connect to AMQP server"
+ " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -116,14 +116,14 @@ class Consumer(messaging.Consumer):
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
- logging.error("Reconnected to queue")
+ logging.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
- logging.exception("Failed to fetch message from queue")
+ logging.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -161,7 +161,7 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
- LOG.debug('Initing the Adapter Consumer for %s' % (topic))
+ LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
@@ -176,7 +176,7 @@ class AdapterConsumer(TopicConsumer):
Example: {'method': 'echo', 'args': {'value': 42}}
"""
- LOG.debug('received %s' % (message_data))
+ LOG.debug(_('received %s') % (message_data))
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
@@ -189,8 +189,8 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
- LOG.warn('no method for message: %s' % (message_data))
- msg_reply(msg_id, 'No method for message: %s' % message_data)
+ LOG.warn(_('no method for message: %s') % (message_data))
+ msg_reply(msg_id, _('No method for message: %s') % message_data)
return
node_func = getattr(self.proxy, str(method))
@@ -246,7 +246,7 @@ def msg_reply(msg_id, reply=None, failure=None):
if failure:
message = failure.getErrorMessage()
traceback = failure.getTraceback()
- logging.error("Returning exception %s to caller", message)
+ logging.error(_("Returning exception %s to caller"), message)
logging.error(traceback)
failure = (failure.type.__name__, str(failure.value), traceback)
conn = Connection.instance()
@@ -287,7 +287,7 @@ def _unpack_context(msg):
if key.startswith('_context_'):
value = msg.pop(key)
context_dict[key[9:]] = value
- LOG.debug('unpacked context: %s', context_dict)
+ LOG.debug(_('unpacked context: %s'), context_dict)
return context.RequestContext.from_dict(context_dict)
@@ -306,10 +306,10 @@ def _pack_context(msg, context):
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
- LOG.debug("Making asynchronous call...")
+ LOG.debug(_("Making asynchronous call..."))
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
- LOG.debug("MSG_ID is %s" % (msg_id))
+ LOG.debug(_("MSG_ID is %s") % (msg_id))
_pack_context(msg, context)
class WaitMessage(object):
@@ -345,7 +345,7 @@ def call_twisted(context, topic, msg):
LOG.debug("Making asynchronous call...")
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
- LOG.debug("MSG_ID is %s" % (msg_id))
+ LOG.debug(_("MSG_ID is %s") % (msg_id))
_pack_context(msg, context)
conn = Connection.instance()
@@ -384,7 +384,7 @@ def cast(context, topic, msg):
def generic_response(message_data, message):
"""Logs a result and exits"""
- LOG.debug('response %s', message_data)
+ LOG.debug(_('response %s'), message_data)
message.ack()
sys.exit(0)
@@ -393,8 +393,8 @@ def send_message(topic, message, wait=True):
"""Sends a message for testing"""
msg_id = uuid.uuid4().hex
message.update({'_msg_id': msg_id})
- LOG.debug('topic is %s', topic)
- LOG.debug('message %s', message)
+ LOG.debug(_('topic is %s'), topic)
+ LOG.debug(_('message %s'), message)
if wait:
consumer = messaging.Consumer(connection=Connection.instance(),
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 7fd09b053..9deaa2777 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -34,5 +34,5 @@ class ChanceScheduler(driver.Scheduler):
hosts = self.hosts_up(context, topic)
if not hosts:
- raise driver.NoValidHost("No hosts found")
+ raise driver.NoValidHost(_("No hosts found"))
return hosts[int(random.random() * len(hosts))]
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index f271d573f..08d7033f5 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -58,4 +58,4 @@ class Scheduler(object):
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
- raise NotImplementedError("Must implement a fallback schedule")
+ raise NotImplementedError(_("Must implement a fallback schedule"))
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 60a3d2b4b..44e21f2fd 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -65,4 +65,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
- logging.debug("Casting to %s %s for %s", topic, host, method)
+ logging.debug(_("Casting to %s %s for %s"), topic, host, method)
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 7f5093656..f9171ab35 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -47,7 +47,7 @@ class SimpleScheduler(chance.ChanceScheduler):
for result in results:
(service, instance_cores) = result
if instance_cores + instance_ref['vcpus'] > FLAGS.max_cores:
- raise driver.NoValidHost("All hosts have too many cores")
+ raise driver.NoValidHost(_("All hosts have too many cores"))
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
@@ -57,7 +57,7 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
- raise driver.NoValidHost("No hosts found")
+ raise driver.NoValidHost(_("No hosts found"))
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
@@ -66,7 +66,8 @@ class SimpleScheduler(chance.ChanceScheduler):
for result in results:
(service, volume_gigabytes) = result
if volume_gigabytes + volume_ref['size'] > FLAGS.max_gigabytes:
- raise driver.NoValidHost("All hosts have too many gigabytes")
+ raise driver.NoValidHost(_("All hosts have too many "
+ "gigabytes"))
if self.service_is_up(service):
# NOTE(vish): this probably belongs in the manager, if we
# can generalize this somehow
@@ -76,7 +77,7 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
- raise driver.NoValidHost("No hosts found")
+ raise driver.NoValidHost(_("No hosts found"))
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
@@ -85,7 +86,7 @@ class SimpleScheduler(chance.ChanceScheduler):
for result in results:
(service, instance_count) = result
if instance_count >= FLAGS.max_networks:
- raise driver.NoValidHost("All hosts have too many networks")
+ raise driver.NoValidHost(_("All hosts have too many networks"))
if self.service_is_up(service):
return service['host']
- raise driver.NoValidHost("No hosts found")
+ raise driver.NoValidHost(_("No hosts found"))
diff --git a/nova/server.py b/nova/server.py
index a0ee54681..e5ce4475a 100644
--- a/nova/server.py
+++ b/nova/server.py
@@ -58,7 +58,7 @@ def stop(pidfile):
try:
pid = int(open(pidfile, 'r').read().strip())
except IOError:
- message = "pidfile %s does not exist. Daemon not running?\n"
+ message = _("pidfile %s does not exist. Daemon not running?\n")
sys.stderr.write(message % pidfile)
return
@@ -84,7 +84,7 @@ def serve(name, main):
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
- logging.debug("Full set of FLAGS: \n\n\n")
+ logging.debug(_("Full set of FLAGS: \n\n\n"))
for flag in FLAGS:
logging.debug("%s : %s", flag, FLAGS.get(flag, None))
diff --git a/nova/service.py b/nova/service.py
index 9454d4049..348b1d192 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -143,7 +143,7 @@ class Service(object, service.Service):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- logging.warn("Starting %s node", topic)
+ logging.warn(_("Starting %s node"), topic)
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -158,7 +158,7 @@ class Service(object, service.Service):
try:
db.service_destroy(context.get_admin_context(), self.service_id)
except exception.NotFound:
- logging.warn("Service killed that has no database entry")
+ logging.warn(_("Service killed that has no database entry"))
@defer.inlineCallbacks
def periodic_tasks(self):
@@ -173,8 +173,8 @@ class Service(object, service.Service):
try:
service_ref = db.service_get(ctxt, self.service_id)
except exception.NotFound:
- logging.debug("The service database object disappeared, "
- "Recreating it.")
+ logging.debug(_("The service database object disappeared, "
+ "Recreating it."))
self._create_service_ref(ctxt)
service_ref = db.service_get(ctxt, self.service_id)
@@ -185,11 +185,11 @@ class Service(object, service.Service):
# TODO(termie): make this pattern be more elegant.
if getattr(self, "model_disconnected", False):
self.model_disconnected = False
- logging.error("Recovered model server connection!")
+ logging.error(_("Recovered model server connection!"))
# TODO(vish): this should probably only catch connection errors
except Exception: # pylint: disable-msg=W0702
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
- logging.exception("model server went away")
+ logging.exception(_("model server went away"))
yield
diff --git a/nova/twistd.py b/nova/twistd.py
index cb5648ce6..c5b7fed8c 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -208,7 +208,7 @@ def stop(pidfile):
pid = None
if not pid:
- message = "pidfile %s does not exist. Daemon not running?\n"
+ message = _("pidfile %s does not exist. Daemon not running?\n")
sys.stderr.write(message % pidfile)
# Not an error in a restart
return
@@ -229,7 +229,7 @@ def stop(pidfile):
def serve(filename):
- logging.debug("Serving %s" % filename)
+ logging.debug(_("Serving %s") % filename)
name = os.path.basename(filename)
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
@@ -281,7 +281,7 @@ def serve(filename):
else:
logging.getLogger().setLevel(logging.WARNING)
- logging.debug("Full set of FLAGS:")
+ logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
diff --git a/nova/utils.py b/nova/utils.py
index 142584df8..f6f03b555 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -49,7 +49,7 @@ def import_class(import_str):
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError):
- raise exception.NotFound('Class %s cannot be found' % class_str)
+ raise exception.NotFound(_('Class %s cannot be found') % class_str)
def import_object(import_str):
@@ -63,7 +63,7 @@ def import_object(import_str):
def fetchfile(url, target):
- logging.debug("Fetching %s" % url)
+ logging.debug(_("Fetching %s") % url)
# c = pycurl.Curl()
# fp = open(target, "wb")
# c.setopt(c.URL, url)
@@ -75,7 +75,7 @@ def fetchfile(url, target):
def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
- logging.debug("Running cmd: %s", cmd)
+ logging.debug(_("Running cmd: %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
@@ -88,7 +88,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
- logging.debug("Result was %s" % (obj.returncode))
+ logging.debug(_("Result was %s") % (obj.returncode))
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@@ -122,7 +122,7 @@ def debug(arg):
def runthis(prompt, cmd, check_exit_code=True):
- logging.debug("Running %s" % (cmd))
+ logging.debug(_("Running %s") % (cmd))
exit_code = subprocess.call(cmd.split(" "))
logging.debug(prompt % (exit_code))
if check_exit_code and exit_code != 0:
@@ -161,7 +161,7 @@ def get_my_ip():
csock.close()
return addr
except socket.gaierror as ex:
- logging.warn("Couldn't get IP, using 127.0.0.1 %s", ex)
+ logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex)
return "127.0.0.1"
@@ -205,7 +205,7 @@ class LazyPluggable(object):
if not self.__backend:
backend_name = self.__pivot.value
if backend_name not in self.__backends:
- raise exception.Error('Invalid backend: %s' % backend_name)
+ raise exception.Error(_('Invalid backend: %s') % backend_name)
backend = self.__backends[backend_name]
if type(backend) == type(tuple()):
diff --git a/nova/validate.py b/nova/validate.py
index 7ea27daa6..49578a24d 100644
--- a/nova/validate.py
+++ b/nova/validate.py
@@ -42,7 +42,7 @@ def rangetest(**argchecks):
# was passed by name
if float(kargs[argname]) < low or \
float(kargs[argname]) > high:
- errmsg = '{0} argument "{1}" not in {2}..{3}'
+ errmsg = _('{0} argument "{1}" not in {2}..{3}')
errmsg = errmsg.format(funcname, argname, low, high)
raise TypeError(errmsg)
@@ -51,8 +51,8 @@ def rangetest(**argchecks):
position = positionals.index(argname)
if float(pargs[position]) < low or \
float(pargs[position]) > high:
- errmsg = '{0} argument "{1}" with value of {4} ' \
- 'not in {2}..{3}'
+ errmsg = _('{0} argument "{1}" with value of {4} '
+ 'not in {2}..{3}')
errmsg = errmsg.format(funcname, argname, low, high,
pargs[position])
raise TypeError(errmsg)
@@ -76,14 +76,14 @@ def typetest(**argchecks):
for (argname, typeof) in argchecks.items():
if argname in kargs:
if not isinstance(kargs[argname], typeof):
- errmsg = '{0} argument "{1}" not of type {2}'
+ errmsg = _('{0} argument "{1}" not of type {2}')
errmsg = errmsg.format(funcname, argname, typeof)
raise TypeError(errmsg)
elif argname in positionals:
position = positionals.index(argname)
if not isinstance(pargs[position], typeof):
- errmsg = '{0} argument "{1}" with value of {2} ' \
- 'not of type {3}'
+ errmsg = _('{0} argument "{1}" with value of {2} '
+ 'not of type {3}')
errmsg = errmsg.format(funcname, argname,
pargs[position], typeof)
raise TypeError(errmsg)