summaryrefslogtreecommitdiffstats
path: root/nova/compute
diff options
context:
space:
mode:
authorjaypipes@gmail.com <>2010-12-11 15:23:40 -0500
committerjaypipes@gmail.com <>2010-12-11 15:23:40 -0500
commit12802a76c775a35e9d5a651bf896cfa25bec547f (patch)
tree8c4411a54ef3a8cabc57a51dd930bd8d6683967a /nova/compute
parenta6645d8a431ed933eef4ea6c42c0224ead6f2272 (diff)
downloadnova-12802a76c775a35e9d5a651bf896cfa25bec547f.tar.gz
nova-12802a76c775a35e9d5a651bf896cfa25bec547f.tar.xz
nova-12802a76c775a35e9d5a651bf896cfa25bec547f.zip
First round of i18n-ifying strings in Nova
Diffstat (limited to 'nova/compute')
-rw-r--r--nova/compute/api.py12
-rw-r--r--nova/compute/disk.py16
-rw-r--r--nova/compute/instance_types.py2
-rw-r--r--nova/compute/manager.py32
-rw-r--r--nova/compute/monitor.py12
5 files changed, 38 insertions, 36 deletions
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 8e0efa4cc..e701e540e 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -120,7 +120,7 @@ class ComputeAPI(base.Base):
elevated = context.elevated()
instances = []
- logging.debug("Going to run %s instances...", num_instances)
+ logging.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = dict(mac_address=utils.generate_mac(),
launch_index=num,
@@ -157,7 +157,7 @@ class ComputeAPI(base.Base):
{"method": "setup_fixed_ip",
"args": {"address": address}})
- logging.debug("Casting to scheduler for %s/%s's instance %s",
+ logging.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
rpc.cast(context,
FLAGS.scheduler_topic,
@@ -204,12 +204,12 @@ class ComputeAPI(base.Base):
instance = self.db.instance_get_by_internal_id(context,
instance_id)
except exception.NotFound as e:
- logging.warning("Instance %d was not found during terminate",
+ logging.warning(_("Instance %d was not found during terminate"),
instance_id)
raise e
if (instance['state_description'] == 'terminating'):
- logging.warning("Instance %d is already being terminated",
+ logging.warning(_("Instance %d is already being terminated"),
instance_id)
return
@@ -223,7 +223,7 @@ class ComputeAPI(base.Base):
address = self.db.instance_get_floating_address(context,
instance['id'])
if address:
- logging.debug("Disassociating address %s" % address)
+ logging.debug(_("Disassociating address %s") % address)
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later. Perhaps in the scheduler?
@@ -234,7 +234,7 @@ class ComputeAPI(base.Base):
address = self.db.instance_get_fixed_address(context, instance['id'])
if address:
- logging.debug("Deallocating address %s" % address)
+ logging.debug(_("Deallocating address %s") % address)
# NOTE(vish): Currently, nothing needs to be done on the
# network node until release. If this changes,
# we will need to cast here.
diff --git a/nova/compute/disk.py b/nova/compute/disk.py
index 4338d39f0..8701c3968 100644
--- a/nova/compute/disk.py
+++ b/nova/compute/disk.py
@@ -70,12 +70,12 @@ def partition(infile, outfile, local_bytes=0, resize=True,
yield execute('resize2fs %s' % infile)
file_size = FLAGS.minimum_root_size
elif file_size % sector_size != 0:
- logging.warn("Input partition size not evenly divisible by"
- " sector size: %d / %d", file_size, sector_size)
+ logging.warn(_("Input partition size not evenly divisible by"
+ " sector size: %d / %d"), file_size, sector_size)
primary_sectors = file_size / sector_size
if local_bytes % sector_size != 0:
- logging.warn("Bytes for local storage not evenly divisible"
- " by sector size: %d / %d", local_bytes, sector_size)
+ logging.warn(_("Bytes for local storage not evenly divisible"
+ " by sector size: %d / %d"), local_bytes, sector_size)
local_sectors = local_bytes / sector_size
mbr_last = 62 # a
@@ -121,14 +121,15 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
"""
out, err = yield execute('sudo losetup -f --show %s' % image)
if err:
- raise exception.Error('Could not attach image to loopback: %s' % err)
+ raise exception.Error(_('Could not attach image to loopback: %s')
+ % err)
device = out.strip()
try:
if not partition is None:
# create partition
out, err = yield execute('sudo kpartx -a %s' % device)
if err:
- raise exception.Error('Failed to load partition: %s' % err)
+ raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
partition)
else:
@@ -141,7 +142,8 @@ def inject_data(image, key=None, net=None, partition=None, execute=None):
out, err = yield execute(
'sudo mount %s %s' % (mapped_device, tmpdir))
if err:
- raise exception.Error('Failed to mount filesystem: %s' % err)
+ raise exception.Error(_('Failed to mount filesystem: %s')
+ % err)
try:
if key:
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index a2679e0fc..000b3a6d9 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -37,7 +37,7 @@ def get_by_type(instance_type):
if instance_type is None:
return FLAGS.default_instance_type
if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError("Unknown instance type: %s",
+ raise exception.ApiError(_("Unknown instance type: %s"),
instance_type)
return instance_type
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index dd8d41129..a63ad5e1b 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -91,8 +91,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
- raise exception.Error("Instance has already been created")
- logging.debug("instance %s: starting...", instance_id)
+ raise exception.Error(_("Instance has already been created"))
+ logging.debug(_("instance %s: starting..."), instance_id)
self.network_manager.setup_compute_network(context, instance_id)
self.db.instance_update(context,
instance_id,
@@ -111,7 +111,7 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
- logging.exception("instance %s: Failed to spawn",
+ logging.exception(_("instance %s: Failed to spawn"),
instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
@@ -124,7 +124,7 @@ class ComputeManager(manager.Manager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this machine."""
context = context.elevated()
- logging.debug("instance %s: terminating", instance_id)
+ logging.debug(_("instance %s: terminating"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
volumes = instance_ref.get('volumes', []) or []
@@ -132,8 +132,8 @@ class ComputeManager(manager.Manager):
self.detach_volume(context, instance_id, volume['id'])
if instance_ref['state'] == power_state.SHUTOFF:
self.db.instance_destroy(context, instance_id)
- raise exception.Error('trying to destroy already destroyed'
- ' instance: %s' % instance_id)
+ raise exception.Error(_('trying to destroy already destroyed'
+ ' instance: %s') % instance_id)
yield self.driver.destroy(instance_ref)
# TODO(ja): should we keep it in a terminated state for a bit?
@@ -148,13 +148,13 @@ class ComputeManager(manager.Manager):
self._update_state(context, instance_id)
if instance_ref['state'] != power_state.RUNNING:
- logging.warn('trying to reboot a non-running '
- 'instance: %s (state: %s excepted: %s)',
+ logging.warn(_('trying to reboot a non-running '
+ 'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING)
- logging.debug('instance %s: rebooting', instance_ref['name'])
+ logging.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@@ -169,7 +169,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: rescuing',
+ logging.debug(_('instance %s: rescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@@ -185,7 +185,7 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- logging.debug('instance %s: unrescuing',
+ logging.debug(_('instance %s: unrescuing'),
instance_ref['internal_id'])
self.db.instance_set_state(context,
instance_id,
@@ -198,7 +198,7 @@ class ComputeManager(manager.Manager):
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
context = context.elevated()
- logging.debug("instance %s: getting console output", instance_id)
+ logging.debug(_("instance %s: getting console output"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_console_output(instance_ref)
@@ -208,7 +208,7 @@ class ComputeManager(manager.Manager):
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
- logging.debug("instance %s: attaching volume %s to %s", instance_id,
+ logging.debug(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint)
instance_ref = self.db.instance_get(context, instance_id)
dev_path = yield self.volume_manager.setup_compute_volume(context,
@@ -225,7 +225,7 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
- logging.exception("instance %s: attach failed %s, removing",
+ logging.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint)
yield self.volume_manager.remove_compute_volume(context,
volume_id)
@@ -237,13 +237,13 @@ class ComputeManager(manager.Manager):
def detach_volume(self, context, instance_id, volume_id):
"""Detach a volume from an instance."""
context = context.elevated()
- logging.debug("instance %s: detaching volume %s",
+ logging.debug(_("instance %s: detaching volume %s"),
instance_id,
volume_id)
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
if instance_ref['name'] not in self.driver.list_instances():
- logging.warn("Detaching volume from unknown instance %s",
+ logging.warn(_("Detaching volume from unknown instance %s"),
instance_ref['name'])
else:
yield self.driver.detach_volume(instance_ref['name'],
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 22653113a..60c347a5e 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -255,7 +255,7 @@ class Instance(object):
Updates the instances statistics and stores the resulting graphs
in the internal object store on the cloud controller.
"""
- logging.debug('updating %s...', self.instance_id)
+ logging.debug(_('updating %s...'), self.instance_id)
try:
data = self.fetch_cpu_stats()
@@ -285,7 +285,7 @@ class Instance(object):
graph_disk(self, '1w')
graph_disk(self, '1m')
except Exception:
- logging.exception('unexpected error during update')
+ logging.exception(_('unexpected error during update'))
self.last_updated = utcnow()
@@ -351,7 +351,7 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
- logging.error('Cannot get blockstats for "%s" on "%s"',
+ logging.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
raise
@@ -373,7 +373,7 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
- logging.error('Cannot get ifstats for "%s" on "%s"',
+ logging.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
raise
@@ -408,7 +408,7 @@ class InstanceMonitor(object, service.Service):
try:
conn = virt_connection.get_connection(read_only=True)
except Exception, exn:
- logging.exception('unexpected exception getting connection')
+ logging.exception(_('unexpected exception getting connection'))
time.sleep(FLAGS.monitoring_instances_delay)
return
@@ -423,7 +423,7 @@ class InstanceMonitor(object, service.Service):
if not domain_id in self._instances:
instance = Instance(conn, domain_id)
self._instances[domain_id] = instance
- logging.debug('Found instance: %s', domain_id)
+ logging.debug(_('Found instance: %s'), domain_id)
for key in self._instances.keys():
instance = self._instances[key]