summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Authors2
-rwxr-xr-xbin/nova-manage107
-rw-r--r--nova/compute/api.py11
-rw-r--r--nova/compute/manager.py22
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py50
-rw-r--r--nova/db/sqlalchemy/models.py1
-rw-r--r--nova/exception.py4
-rw-r--r--nova/scheduler/driver.py111
-rw-r--r--nova/scheduler/manager.py3
-rw-r--r--nova/scheduler/multi.py2
-rw-r--r--nova/tests/fake_libvirt_utils.py2
-rw-r--r--nova/tests/scheduler/test_scheduler.py54
-rw-r--r--nova/tests/test_compute.py4
-rw-r--r--nova/tests/test_instance_types.py2
-rw-r--r--nova/tests/test_libvirt.py94
-rw-r--r--nova/tests/test_virt_drivers.py5
-rw-r--r--nova/tests/test_xenapi.py50
-rw-r--r--nova/virt/fake.py6
-rw-r--r--nova/virt/libvirt/connection.py77
-rw-r--r--nova/virt/xenapi/vm_utils.py47
-rw-r--r--po/bs.po2801
-rw-r--r--po/es.po332
22 files changed, 3362 insertions, 425 deletions
diff --git a/Authors b/Authors
index e6eda9240..c7b1e894f 100644
--- a/Authors
+++ b/Authors
@@ -88,6 +88,7 @@ Justin Shepherd <jshepher@rackspace.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Keisuke Tagami <tagami.keisuke@lab.ntt.co.jp>
masumoto<masumotok@nttdata.co.jp>
+masukotm<masukotm@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
@@ -138,6 +139,7 @@ Stephanie Reese <reese.sm@gmail.com>
Thierry Carrez <thierry@openstack.org>
Tim Simpson <tim.simpson@rackspace.com>
Todd Willey <todd@ansolabs.com>
+Tomoya Masuko<masukotm@nttdata.co.jp>
Trey Morris <trey.morris@rackspace.com>
Troy Toman <troy.toman@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com>
diff --git a/bin/nova-manage b/bin/nova-manage
index 6147b1202..3636d99ee 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -980,7 +980,8 @@ class VmCommands(object):
instance['availability_zone'],
instance['launch_index'])
- def _migration(self, ec2_id, dest, block_migration=False):
+ def _migration(self, ec2_id, dest, block_migration=False,
+ disk_over_commit=False):
"""Migrates a running instance to a new machine.
:param ec2_id: instance id which comes from euca-describe-instance.
:param dest: destination host name.
@@ -1007,7 +1008,8 @@ class VmCommands(object):
"args": {"instance_id": instance_id,
"dest": dest,
"topic": FLAGS.compute_topic,
- "block_migration": block_migration}})
+ "block_migration": block_migration,
+ "disk_over_commit": disk_over_commit}})
print _('Migration of %s initiated.'
'Check its progress using euca-describe-instances.') % ec2_id
@@ -1022,11 +1024,14 @@ class VmCommands(object):
@args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID')
@args('--dest', dest='dest', metavar='<Destanation>',
- help='destanation node')
- def block_migration(self, ec2_id, dest):
+ help='destanation node')
+ @args('--disk_over_commit', dest='disk_over_commit',
+ metavar='<overcommit flag>',
+ help='Allow overcommit (default Flase)')
+ def block_migration(self, ec2_id, dest, disk_over_commit=False):
"""Migrates a running instance to a new machine with storage data."""
- self._migration(ec2_id, dest, True)
+ self._migration(ec2_id, dest, True, disk_over_commit)
class ServiceCommands(object):
@@ -1091,8 +1096,11 @@ class ServiceCommands(object):
@args('--host', dest='host', metavar='<host>', help='Host')
def describe_resource(self, host):
- """Describes cpu/memory/hdd info for host."""
+ """Describes cpu/memory/hdd info for host.
+
+ :param host: hostname.
+ """
result = rpc.call(context.get_admin_context(),
FLAGS.scheduler_topic,
{"method": "show_host_resources",
@@ -1102,49 +1110,66 @@ class ServiceCommands(object):
print _('An unexpected error has occurred.')
print _('[Result]'), result
else:
- cpu = result['resource']['vcpus']
- mem = result['resource']['memory_mb']
- hdd = result['resource']['local_gb']
- cpu_u = result['resource']['vcpus_used']
- mem_u = result['resource']['memory_mb_used']
- hdd_u = result['resource']['local_gb_used']
-
+ # Printing a total and used_now
+ # (NOTE)The host name width 16 characters
+ print '%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' % {"a": _('HOST'),
+ "b": _('PROJECT'),
+ "c": _('cpu'),
+ "d": _('mem(mb)'),
+ "e": _('hdd')}
+ print '%(a)-16s(total)%(b)26s%(c)8s%(d)8s' %\
+ {"a": host,
+ "b": result['resource']['vcpus'],
+ "c": result['resource']['memory_mb'],
+ "d": result['resource']['local_gb']}
+
+ print '%(a)-16s(used_now)%(b)23s%(c)8s%(d)8s' %\
+ {"a": host,
+ "b": result['resource']['vcpus_used'],
+ "c": result['resource']['memory_mb_used'],
+ "d": result['resource']['local_gb_used']}
+
+ # Printing a used_max
cpu_sum = 0
mem_sum = 0
hdd_sum = 0
- print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)'
- print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd)
- print '%s(used_now)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u)
- for p_id, val in result['usage'].items():
- cpu_sum += val['vcpus']
- mem_sum += val['memory_mb']
- hdd_sum += val['local_gb']
- print '%s(used_max)\t\t\t%s\t%s\t%s' % (host, cpu_sum,
- mem_sum, hdd_sum)
+ ctxt = context.get_admin_context()
+ instance_refs = db.instance_get_all_by_host(ctxt, host)
- for p_id, val in result['usage'].items():
- print '%s\t\t%s\t\t%s\t%s\t%s' % (host,
- p_id,
- val['vcpus'],
- val['memory_mb'],
- val['local_gb'])
+ project_ids = [i['project_id'] for i in instance_refs]
+ project_ids = list(set(project_ids))
+ usage = dict()
+ for project_id in project_ids:
+ vcpus = [i['vcpus'] for i in instance_refs \
+ if i['project_id'] == project_id]
- @args('--host', dest='host', metavar='<host>', help='Host')
- def update_resource(self, host):
- """Updates available vcpu/memory/disk info for host."""
+ mem = [i['memory_mb'] for i in instance_refs \
+ if i['project_id'] == project_id]
- ctxt = context.get_admin_context()
- service_refs = db.service_get_all_by_host(ctxt, host)
- if len(service_refs) <= 0:
- raise exception.Invalid(_('%s does not exist.') % host)
+ disk = [i['local_gb'] for i in instance_refs \
+ if i['project_id'] == project_id]
- service_refs = [s for s in service_refs if s['topic'] == 'compute']
- if len(service_refs) <= 0:
- raise exception.Invalid(_('%s is not compute node.') % host)
+ usage[project_id] = {
+ 'vcpus': reduce(lambda x, y: x + y, vcpus),
+ 'memory_mb': reduce(lambda x, y: x + y, mem),
+ 'local_gb': reduce(lambda x, y: x + y, disk)}
- rpc.call(ctxt,
- db.queue_get_for(ctxt, FLAGS.compute_topic, host),
- {"method": "update_available_resource"})
+ for p_id, val in usage.items():
+ cpu_sum += val['vcpus']
+ mem_sum += val['memory_mb']
+ hdd_sum += val['local_gb']
+ print '%(a)-16s(used_max)%(b)23s%(c)8s%(d)8s' % {"a": host,
+ "b": cpu_sum,
+ "c": mem_sum,
+ "d": hdd_sum}
+
+ for p_id, val in usage.items():
+ print '%(a)-25s%(b)16s%(c)8s%(d)8s%(e)8s' %\
+ {"a": host,
+ "b": p_id,
+ "c": val['vcpus'],
+ "d": val['memory_mb'],
+ "e": val['local_gb']}
class HostCommands(object):
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 3630a3ec2..0ed562a1b 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -1199,14 +1199,12 @@ class API(base.Base):
'image_type': image_type,
}
+ sent_meta = {'name': name, 'is_public': False}
+
if image_type == 'backup':
properties['backup_type'] = backup_type
- properties.update(extra_properties or {})
- sent_meta = {'name': name, 'is_public': False,
- 'status': 'creating', 'properties': properties}
-
- if image_type == 'snapshot':
+ elif image_type == 'snapshot':
min_ram, min_disk = self._get_minram_mindisk_params(context,
instance)
if min_ram is not None:
@@ -1214,6 +1212,9 @@ class API(base.Base):
if min_disk is not None:
sent_meta['min_disk'] = min_disk
+ properties.update(extra_properties or {})
+ sent_meta['properties'] = properties
+
recv_meta = self.image_service.create(context, sent_meta)
params = {'image_id': recv_meta['id'], 'image_type': image_type,
'backup_type': backup_type, 'rotation': rotation}
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 58b179464..2e24ffbae 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -1663,6 +1663,17 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.update_available_resource(context, self.host)
+ def get_instance_disk_info(self, context, instance_name):
+ """Getting infomation of instance's current disk.
+
+ Implementation nova.virt.libvirt.connection.
+
+ :param context: security context
+ :param instance_name: instance name
+
+ """
+ return self.driver.get_instance_disk_info(instance_name)
+
def pre_live_migration(self, context, instance_id, time=None,
block_migration=False, disk=None):
"""Preparations for live migration at dest host.
@@ -1735,7 +1746,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
- :param block_migration: if true, do block migration
+ :param block_migration: if true, prepare for block migration
"""
# Get instance for error handling.
@@ -1751,8 +1762,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"args": {'instance_id': instance_id}})
if block_migration:
- disk = self.driver.get_instance_disk_info(context,
- instance_ref)
+ disk = self.driver.get_instance_disk_info(instance_ref.name)
else:
disk = None
@@ -1790,7 +1800,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
- :param block_migration: if true, do block migration
+ :param block_migration: if true, prepare for block migration
"""
@@ -1879,7 +1889,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param context: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
- :param block_migration: block_migration
+ :param block_migration: if true, prepare for block migration
"""
instance_ref = self.db.instance_get(context, instance_id)
@@ -1900,6 +1910,8 @@ class ComputeManager(manager.SchedulerDependentManager):
:param dest:
This method is called from live migration src host.
This param specifies destination host.
+ :param block_migration: if true, prepare for block migration
+
"""
host = instance_ref['host']
self._instance_update(context,
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py
new file mode 100644
index 000000000..a16cd4dc8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/069_block_migration.py
@@ -0,0 +1,50 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData
+from sqlalchemy import Table, Text
+from nova import log as logging
+
+meta = MetaData()
+
+# Add disk_available_least column to compute_nodes table.
+# Thinking about qcow2 image support, both compressed and virtual disk size
+# has to be considered.
+# disk_available stores "total disk size - used disk(compressed disk size)",
+# while disk_available_least stores
+# "total disk size - used disk(virtual disk size)".
+# virtual disk size is used for kvm block migration.
+
+compute_nodes = Table('compute_nodes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False))
+
+disk_available_least = Column('disk_available_least', Integer(), default=0)
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ try:
+ compute_nodes.create_column(disk_available_least)
+ except Exception:
+ logging.error(_("progress column not added to compute_nodes table"))
+ raise
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ compute_nodes.drop_column(disk_available_least)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index cdcef7179..92c9bb27f 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -147,6 +147,7 @@ class ComputeNode(BASE, NovaBase):
# above, since it is copied from <cpu> tag of getCapabilities()
# (See libvirt.virtConnection).
cpu_info = Column(Text, nullable=True)
+ disk_available_least = Column(Integer)
class Certificate(BASE, NovaBase):
diff --git a/nova/exception.py b/nova/exception.py
index 870ae80e4..716f86dc1 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -299,6 +299,10 @@ class UnableToMigrateToSelf(Invalid):
"to current host (%(host)s).")
+class DestinationHostUnavailable(Invalid):
+ message = _("Destination compute host is unavailable at this time.")
+
+
class SourceHostUnavailable(Invalid):
message = _("Original compute host is unavailable at this time.")
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 85acf6a09..b3b598de9 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -169,12 +169,16 @@ class Scheduler(object):
raise NotImplementedError(_("Must implement a fallback schedule"))
def schedule_live_migration(self, context, instance_id, dest,
- block_migration=False):
+ block_migration=False,
+ disk_over_commit=False):
"""Live migration scheduling method.
-
:param context:
:param instance_id:
:param dest: destination host
+ :param block_migration: if true, block_migration.
+ :param disk_over_commit: if True, consider real(not virtual)
+ disk size.
+
:return:
The host where instance is running currently.
Then scheduler send request that host.
@@ -187,10 +191,12 @@ class Scheduler(object):
# Checking destination host.
self._live_migration_dest_check(context, instance_ref,
- dest, block_migration)
+ dest, block_migration,
+ disk_over_commit)
# Common checking.
self._live_migration_common_check(context, instance_ref,
- dest, block_migration)
+ dest, block_migration,
+ disk_over_commit)
# Changing instance_state.
values = {"vm_state": vm_states.MIGRATING}
@@ -238,13 +244,15 @@ class Scheduler(object):
raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest,
- block_migration):
+ block_migration, disk_over_commit):
"""Live migration check routine (for destination host).
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
-
+ :param block_migration: if true, block_migration.
+ :param disk_over_commit: if True, consider real(not virtual)
+ disk size.
"""
# Checking dest exists and compute node.
@@ -267,10 +275,11 @@ class Scheduler(object):
self.assert_compute_node_has_enough_resources(context,
instance_ref,
dest,
- block_migration)
+ block_migration,
+ disk_over_commit)
def _live_migration_common_check(self, context, instance_ref, dest,
- block_migration):
+ block_migration, disk_over_commit):
"""Live migration common check routine.
Below checkings are followed by
@@ -279,7 +288,9 @@ class Scheduler(object):
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
- :param block_migration if True, check for block_migration.
+ :param block_migration: if true, block_migration.
+ :param disk_over_commit: if True, consider real(not virtual)
+ disk size.
"""
@@ -300,7 +311,7 @@ class Scheduler(object):
"and %(dest)s.") % locals())
raise
- # Checking dest exists.
+ # Checking destination host exists.
dservice_refs = db.service_get_all_compute_by_host(context, dest)
dservice_ref = dservice_refs[0]['compute_node'][0]
@@ -338,20 +349,26 @@ class Scheduler(object):
raise
def assert_compute_node_has_enough_resources(self, context, instance_ref,
- dest, block_migration):
+ dest, block_migration,
+ disk_over_commit):
"""Checks if destination host has enough resource for live migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
- :param block_migration: if True, disk checking has been done
+ :param block_migration: if true, block_migration.
+ :param disk_over_commit: if True, consider real(not virtual)
+ disk size.
"""
- self.assert_compute_node_has_enough_memory(context, instance_ref, dest)
+ self.assert_compute_node_has_enough_memory(context,
+ instance_ref, dest)
if not block_migration:
return
- self.assert_compute_node_has_enough_disk(context, instance_ref, dest)
+ self.assert_compute_node_has_enough_disk(context,
+ instance_ref, dest,
+ disk_over_commit)
def assert_compute_node_has_enough_memory(self, context,
instance_ref, dest):
@@ -364,7 +381,7 @@ class Scheduler(object):
"""
- # Getting total available memory and disk of host
+ # Getting total available memory of host
avail = self._get_compute_info(context, dest, 'memory_mb')
# Getting total used memory and disk of host
@@ -385,35 +402,65 @@ class Scheduler(object):
"instance:%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
- def assert_compute_node_has_enough_disk(self, context,
- instance_ref, dest):
+ def assert_compute_node_has_enough_disk(self, context, instance_ref, dest,
+ disk_over_commit):
"""Checks if destination host has enough disk for block migration.
:param context: security context
:param instance_ref: nova.db.sqlalchemy.models.Instance object
:param dest: destination host
+ :param disk_over_commit: if True, consider real(not virtual)
+ disk size.
"""
- # Getting total available memory and disk of host
- avail = self._get_compute_info(context, dest, 'local_gb')
+ # Libvirt supports qcow2 disk format,which is usually compressed
+ # on compute nodes.
+ # Real disk image (compressed) may enlarged to "virtual disk size",
+ # that is specified as the maximum disk size.
+ # (See qemu-img -f path-to-disk)
+ # Scheduler recognizes destination host still has enough disk space
+ # if real disk size < available disk size
+ # if disk_over_commit is True,
+ # otherwise virtual disk size < available disk size.
+
+ # Refresh compute_nodes table
+ topic = db.queue_get_for(context, FLAGS.compute_topic, dest)
+ rpc.call(context, topic,
+ {"method": "update_available_resource"})
+
+ # Getting total available disk of host
+ available_gb = self._get_compute_info(context,
+ dest, 'disk_available_least')
+ available = available_gb * (1024 ** 3)
+
+ # Getting necessary disk size
+ try:
+ topic = db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ ret = rpc.call(context, topic,
+ {"method": 'get_instance_disk_info',
+ "args": {'instance_name': instance_ref.name}})
+ disk_infos = utils.loads(ret)
+ except rpc.RemoteError:
+ LOG.exception(_("host %(dest)s is not compatible with "
+ "original host %(src)s.") % locals())
+ raise
- # Getting total used memory and disk of host
- # It should be sum of disks that are assigned as max value
- # because overcommiting is risky.
- used = 0
- instance_refs = db.instance_get_all_by_host(context, dest)
- used_list = [i['local_gb'] for i in instance_refs]
- if used_list:
- used = reduce(lambda x, y: x + y, used_list)
+ necessary = 0
+ if disk_over_commit:
+ for info in disk_infos:
+ necessary += int(info['disk_size'])
+ else:
+ for info in disk_infos:
+ necessary += int(info['virt_disk_size'])
- disk_inst = instance_ref['local_gb']
- avail = avail - used
- if avail <= disk_inst:
+ # Check that available disk > necessary disk
+ if (available - necessary) < 0:
instance_id = ec2utils.id_to_ec2_id(instance_ref['id'])
reason = _("Unable to migrate %(instance_id)s to %(dest)s: "
- "Lack of disk(host:%(avail)s "
- "<= instance:%(disk_inst)s)")
+ "Lack of disk(host:%(available)s "
+ "<= instance:%(necessary)s)")
raise exception.MigrationError(reason=reason % locals())
def _get_compute_info(self, context, host, key):
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 7edc4e4f2..c74988cda 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -148,6 +148,9 @@ class SchedulerManager(manager.Manager):
'local_gb_used': 64}
"""
+ # Update latest compute_node table
+ topic = db.queue_get_for(context, FLAGS.compute_topic, host)
+ rpc.call(context, topic, {"method": "update_available_resource"})
# Getting compute node info and related instances info
compute_ref = db.service_get_all_compute_by_host(context, host)
diff --git a/nova/scheduler/multi.py b/nova/scheduler/multi.py
index 511a8fa47..83ecd8e63 100644
--- a/nova/scheduler/multi.py
+++ b/nova/scheduler/multi.py
@@ -20,7 +20,6 @@
"""
Scheduler that allows routing some calls to one driver and others to another.
"""
-
from nova import flags
from nova import utils
from nova.scheduler import driver
@@ -39,6 +38,7 @@ flags.DEFINE_string('volume_scheduler_driver',
_METHOD_MAP = {'run_instance': 'compute',
'start_instance': 'compute',
'prep_resize': 'compute',
+ 'live_migration': 'compute',
'create_volume': 'volume',
'create_volumes': 'volume'}
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index 085a61bc9..b05f11178 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -33,7 +33,7 @@ def get_disk_size(path):
return disk_sizes.get(path, 1024 * 1024 * 20)
-def get_backing_file(path):
+def get_disk_backing_file(path):
return disk_backing_files.get(path, None)
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 48f678d56..550964844 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -36,6 +36,7 @@ from nova import rpc
from nova import utils
from nova.scheduler import api
from nova.scheduler import driver
+from nova.scheduler import zone_manager
from nova.scheduler import manager
from nova.scheduler.simple import SimpleScheduler
from nova.compute import power_state
@@ -999,9 +1000,9 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
driver_i._live_migration_src_check(nocare, nocare)
driver_i._live_migration_dest_check(nocare, nocare,
- i_ref['host'], False)
+ i_ref['host'], False, False)
driver_i._live_migration_common_check(nocare, nocare,
- i_ref['host'], False)
+ i_ref['host'], False, False)
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
kwargs = {'instance_id': instance_id, 'dest': i_ref['host'],
'block_migration': False}
@@ -1013,7 +1014,8 @@ class SimpleDriverTestCase(test.TestCase):
self.scheduler.live_migration(self.context, FLAGS.compute_topic,
instance_id=instance_id,
dest=i_ref['host'],
- block_migration=False)
+ block_migration=False,
+ disk_over_commit=False)
i_ref = db.instance_get(self.context, instance_id)
self.assertTrue(i_ref['vm_state'] == vm_states.MIGRATING)
@@ -1095,7 +1097,22 @@ class SimpleDriverTestCase(test.TestCase):
self.assertRaises(exception.ComputeServiceUnavailable,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'], False)
+ self.context, i_ref, i_ref['host'], False, False)
+
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_not_alive(self):
+ """Confirms exception raises in case dest host does not exist."""
+ instance_id = _create_instance()['id']
+ i_ref = db.instance_get(self.context, instance_id)
+ t = utils.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, i_ref['host'], False, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -1108,7 +1125,7 @@ class SimpleDriverTestCase(test.TestCase):
self.assertRaises(exception.UnableToMigrateToSelf,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, i_ref['host'], False)
+ self.context, i_ref, i_ref['host'], False, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -1123,7 +1140,7 @@ class SimpleDriverTestCase(test.TestCase):
self.assertRaises(exception.MigrationError,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, 'somewhere', False)
+ self.context, i_ref, 'somewhere', False, False)
db.instance_destroy(self.context, instance_id)
db.instance_destroy(self.context, instance_id2)
@@ -1139,7 +1156,7 @@ class SimpleDriverTestCase(test.TestCase):
self.assertRaises(exception.MigrationError,
self.scheduler.driver._live_migration_dest_check,
- self.context, i_ref, 'somewhere', True)
+ self.context, i_ref, 'somewhere', True, False)
db.instance_destroy(self.context, instance_id)
db.instance_destroy(self.context, instance_id2)
@@ -1155,7 +1172,7 @@ class SimpleDriverTestCase(test.TestCase):
ret = self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
'somewhere',
- False)
+ False, False)
self.assertTrue(ret is None)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -1191,7 +1208,7 @@ class SimpleDriverTestCase(test.TestCase):
#self.assertRaises(exception.SourceHostUnavailable,
self.assertRaises(exception.FileNotFound,
self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest, False)
+ self.context, i_ref, dest, False, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -1215,7 +1232,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest, False)
+ self.context, i_ref, dest, False, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -1241,7 +1258,7 @@ class SimpleDriverTestCase(test.TestCase):
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,
self.scheduler.driver._live_migration_common_check,
- self.context, i_ref, dest, False)
+ self.context, i_ref, dest, False, False)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -1275,6 +1292,7 @@ class SimpleDriverTestCase(test.TestCase):
driver._live_migration_common_check(self.context,
i_ref,
dest,
+ False,
False)
except rpc.RemoteError, e:
c = (e.exc_type == exception.InvalidCPUInfo)
@@ -1284,6 +1302,20 @@ class SimpleDriverTestCase(test.TestCase):
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
+ def test_exception_puts_instance_in_error_state(self):
+ """Test that an exception from the scheduler puts an instance
+ in the ERROR state."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ inst = _create_instance()
+ self.assertRaises(Exception, scheduler._schedule,
+ 'failing_method', ctxt, 'scheduler',
+ instance_id=inst['uuid'])
+
+ # Refresh the instance
+ inst = db.instance_get(ctxt, inst['id'])
+
class MultiDriverTestCase(SimpleDriverTestCase):
"""Test case for multi driver."""
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 56906b81f..bf36e7431 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -1194,6 +1194,10 @@ class ComputeTestCase(BaseTestCase):
self.mox.StubOutWithMock(rpc, 'call')
rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
"args": {'instance_id': instance_id}})
+
+ self.mox.StubOutWithMock(self.compute.driver, 'get_instance_disk_info')
+ self.compute.driver.get_instance_disk_info(inst_ref.name)
+
rpc.call(c, topic, {"method": "pre_live_migration",
"args": {'instance_id': instance_id,
'block_migration': True,
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 6c7b9d76e..aa277206f 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -144,7 +144,7 @@ class InstanceTypeTestCase(test.TestCase):
instance_types.create(name, 256, 1, 120, 'flavor1')
self.assertRaises(exception.ApiError,
instance_types.create,
- name, 256, 1, 120, 'flavor2')
+ name, "256", 1, 120, 'flavor2')
def test_duplicate_flavorids_fail(self):
"""Ensures that flavorid duplicates raise ApiError"""
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 15b7f7259..782e01563 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -757,67 +757,6 @@ class LibvirtConnTestCase(test.TestCase):
self.assertEquals(conn.uri, testuri)
db.instance_destroy(user_context, instance_ref['id'])
- def test_update_available_resource_works_correctly(self):
- """Confirm compute_node table is updated successfully."""
- self.flags(instances_path='.')
-
- # Prepare mocks
- def getVersion():
- return 12003
-
- def getType():
- return 'qemu'
-
- def listDomainsID():
- return []
-
- service_ref = self.create_service(host='dummy')
- self.create_fake_libvirt_mock(getVersion=getVersion,
- getType=getType,
- listDomainsID=listDomainsID)
- self.mox.StubOutWithMock(connection.LibvirtConnection,
- 'get_cpu_info')
- connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
-
- # Start test
- self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
- conn.update_available_resource(self.context, 'dummy')
- service_ref = db.service_get(self.context, service_ref['id'])
- compute_node = service_ref['compute_node'][0]
-
- if sys.platform.upper() == 'LINUX2':
- self.assertTrue(compute_node['vcpus'] >= 0)
- self.assertTrue(compute_node['memory_mb'] > 0)
- self.assertTrue(compute_node['local_gb'] > 0)
- self.assertTrue(compute_node['vcpus_used'] == 0)
- self.assertTrue(compute_node['memory_mb_used'] > 0)
- self.assertTrue(compute_node['local_gb_used'] > 0)
- self.assertTrue(len(compute_node['hypervisor_type']) > 0)
- self.assertTrue(compute_node['hypervisor_version'] > 0)
- else:
- self.assertTrue(compute_node['vcpus'] >= 0)
- self.assertTrue(compute_node['memory_mb'] == 0)
- self.assertTrue(compute_node['local_gb'] > 0)
- self.assertTrue(compute_node['vcpus_used'] == 0)
- self.assertTrue(compute_node['memory_mb_used'] == 0)
- self.assertTrue(compute_node['local_gb_used'] > 0)
- self.assertTrue(len(compute_node['hypervisor_type']) > 0)
- self.assertTrue(compute_node['hypervisor_version'] > 0)
-
- db.service_destroy(self.context, service_ref['id'])
-
- def test_update_resource_info_no_compute_record_found(self):
- """Raise exception if no recorde found on services table."""
- self.flags(instances_path='.')
- self.create_fake_libvirt_mock()
-
- self.mox.ReplayAll()
- conn = connection.LibvirtConnection(False)
- self.assertRaises(exception.ComputeServiceUnavailable,
- conn.update_available_resource,
- self.context, 'dummy')
-
@test.skip_if(missing_libvirt(), "Test requires libvirt")
def test_ensure_filtering_rules_for_instance_timeout(self):
"""ensure_filtering_fules_for_instance() finishes with timeout."""
@@ -950,7 +889,7 @@ class LibvirtConnTestCase(test.TestCase):
# Test data
instance_ref = db.instance_create(self.context, self.test_instance)
- dummyjson = ('[{"path": "%s/disk", "local_gb": "10G",'
+ dummyjson = ('[{"path": "%s/disk", "disk_size": "10737418240",'
' "type": "raw", "backing_file": ""}]')
# Preparing mocks
@@ -984,6 +923,13 @@ class LibvirtConnTestCase(test.TestCase):
"<target dev='vdb' bus='virtio'/></disk>"
"</devices></domain>")
+ ret = ("image: /test/disk\n"
+ "file format: raw\n"
+ "virtual size: 20G (21474836480 bytes)\n"
+ "disk size: 3.1G\n"
+ "cluster_size: 2097152\n"
+ "backing file: /test/dummy (actual path: /backing/file)\n")
+
# Preparing mocks
vdmock = self.mox.CreateMock(libvirt.virDomain)
self.mox.StubOutWithMock(vdmock, "XMLDesc")
@@ -998,18 +944,27 @@ class LibvirtConnTestCase(test.TestCase):
fake_libvirt_utils.disk_sizes['/test/disk'] = 10 * GB
fake_libvirt_utils.disk_sizes['/test/disk.local'] = 20 * GB
fake_libvirt_utils.disk_backing_files['/test/disk.local'] = 'file'
+
+ self.mox.StubOutWithMock(os.path, "getsize")
+ os.path.getsize('/test/disk').AndReturn((10737418240))
+
+ self.mox.StubOutWithMock(utils, "execute")
+ utils.execute('qemu-img', 'info', '/test/disk.local').\
+ AndReturn((ret, ''))
+
+ os.path.getsize('/test/disk.local').AndReturn((21474836480))
+
self.mox.ReplayAll()
conn = connection.LibvirtConnection(False)
- info = conn.get_instance_disk_info(self.context, instance_ref)
+ info = conn.get_instance_disk_info(instance_ref.name)
info = utils.loads(info)
-
self.assertEquals(info[0]['type'], 'raw')
- self.assertEquals(info[1]['type'], 'qcow2')
self.assertEquals(info[0]['path'], '/test/disk')
- self.assertEquals(info[1]['path'], '/test/disk.local')
- self.assertEquals(info[0]['local_gb'], '10G')
- self.assertEquals(info[1]['local_gb'], '20G')
+ self.assertEquals(info[0]['disk_size'], 10737418240)
self.assertEquals(info[0]['backing_file'], "")
+ self.assertEquals(info[1]['type'], 'qcow2')
+ self.assertEquals(info[1]['path'], '/test/disk.local')
+ self.assertEquals(info[1]['virt_disk_size'], 21474836480)
self.assertEquals(info[1]['backing_file'], "file")
db.instance_destroy(self.context, instance_ref['id'])
@@ -1188,6 +1143,9 @@ class HostStateTestCase(test.TestCase):
def get_hypervisor_version(self):
return 13091
+ def get_disk_available_least(self):
+ return 13091
+
def test_update_status(self):
self.mox.StubOutWithMock(connection, 'get_connection')
connection.get_connection(True).AndReturn(self.FakeConnection())
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index 8c5cda6f0..9c17b3b0a 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -324,11 +324,6 @@ class _VirtDriverTestCase(test.TestCase):
self.connection.refresh_provider_fw_rules()
@catch_notimplementederror
- def test_update_available_resource(self):
- self.compute = self.start_service('compute', host='dummy')
- self.connection.update_available_resource(self.ctxt, 'dummy')
-
- @catch_notimplementederror
def test_compare_cpu(self):
cpu_info = '''{ "topology": {
"sockets": 1,
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 12e15c991..77cde2a02 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1042,45 +1042,21 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance.os_type = 'linux'
self.fake_instance.architecture = 'x86-64'
- def assert_disk_type(self, disk_type):
- ctx = context.RequestContext('fake', 'fake')
- fake_glance = glance_stubs.FakeGlance('')
- image_meta = fake_glance.get_image_meta(self.fake_instance.image_ref)
- dt = vm_utils.VMHelper.determine_disk_image_type(image_meta)
- self.assertEqual(disk_type, dt)
-
- def test_instance_disk(self):
- """If a kernel is specified, the image type is DISK (aka machine)."""
- self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
- self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
- self.assert_disk_type(vm_utils.ImageType.DISK)
-
- def test_instance_disk_raw(self):
- """
- If the kernel isn't specified, and we're not using Glance, then
- DISK_RAW is assumed.
- """
- self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
- self.fake_instance.kernel_id = None
- self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+ def assert_disk_type(self, image_meta, expected_disk_type):
+ actual = vm_utils.VMHelper.determine_disk_image_type(image_meta)
+ self.assertEqual(expected_disk_type, actual)
- def test_glance_disk_raw(self):
- """
- If we're using Glance, then defer to the image_type field, which in
- this case will be 'raw'.
- """
- self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
- self.fake_instance.kernel_id = None
- self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+ def test_machine(self):
+ image_meta = {'id': 'a', 'disk_format': 'ami'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK)
- def test_glance_disk_vhd(self):
- """
- If we're using Glance, then defer to the image_type field, which in
- this case will be 'vhd'.
- """
- self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
- self.fake_instance.kernel_id = None
- self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
+ def test_raw(self):
+ image_meta = {'id': 'a', 'disk_format': 'raw'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_RAW)
+
+ def test_vhd(self):
+ image_meta = {'id': 'a', 'disk_format': 'vhd'}
+ self.assert_disk_type(image_meta, vm_utils.ImageType.DISK_VHD)
class CompareVersionTestCase(test.TestCase):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index b9fd8f30c..3311834c2 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -283,7 +283,7 @@ class FakeConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
raise NotImplementedError('This method is supported only by libvirt.')
- def get_instance_disk_info(self, ctxt, instance_ref):
+ def get_instance_disk_info(self, instance_name):
"""This method is supported only by libvirt."""
return
@@ -319,3 +319,7 @@ class FakeConnection(driver.ComputeDriver):
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
+
+ def get_disk_available_least(self):
+ """ """
+ pass
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index cfcda9f84..85c48e495 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -1562,7 +1562,8 @@ class LibvirtConnection(driver.ComputeDriver):
'local_gb_used': self.get_local_gb_used(),
'hypervisor_type': self.get_hypervisor_type(),
'hypervisor_version': self.get_hypervisor_version(),
- 'cpu_info': self.get_cpu_info()}
+ 'cpu_info': self.get_cpu_info(),
+ 'disk_available_least': self.get_disk_available_least()}
compute_node_ref = service_ref['compute_node']
if not compute_node_ref:
@@ -1773,7 +1774,7 @@ class LibvirtConnection(driver.ComputeDriver):
instance_disk = os.path.join(instance_dir, base)
if not info['backing_file']:
libvirt_utils.create_image(info['type'], instance_disk,
- info['local_gb'])
+ info['disk_size'])
else:
# Creating backing file follows same way as spawning instances.
backing_file = os.path.join(FLAGS.instances_path,
@@ -1842,7 +1843,7 @@ class LibvirtConnection(driver.ComputeDriver):
dom = self._lookup_by_name(instance_ref.name)
self._conn.defineXML(dom.XMLDesc(0))
- def get_instance_disk_info(self, ctxt, instance_ref):
+ def get_instance_disk_info(self, instance_name):
"""Preparation block migration.
:params ctxt: security context
@@ -1851,12 +1852,15 @@ class LibvirtConnection(driver.ComputeDriver):
instance object that is migrated.
:return:
json strings with below format.
- "[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]"
+ "[{'path':'disk', 'type':'raw',
+ 'virt_disk_size':'10737418240',
+ 'backing_file':'backing_file',
+ 'disk_size':'83886080'},...]"
"""
disk_info = []
- virt_dom = self._lookup_by_name(instance_ref.name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
doc = ElementTree.fromstring(xml)
disk_nodes = doc.findall('.//devices/disk')
@@ -1873,31 +1877,58 @@ class LibvirtConnection(driver.ComputeDriver):
continue
disk_type = driver_nodes[cnt].get('type')
- size = libvirt_utils.get_disk_size(path)
if disk_type == 'raw':
+ dk_size = int(os.path.getsize(path))
backing_file = ""
+ virt_size = 0
else:
- backing_file = libvirt_utils.get_backing_file(path)
-
- # block migration needs same/larger size of empty image on the
- # destination host. since qemu-img creates bit smaller size image
- # depending on original image size, fixed value is necessary.
- for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2),
- ('K', 1024), ('', 1)]:
- if size / divisor == 0:
- continue
- if size % divisor != 0:
- size = size / divisor + 1
- else:
- size = size / divisor
- size = str(size) + unit
- break
+ out, err = utils.execute('qemu-img', 'info', path)
+
+ # virtual size:
+ size = [i.split('(')[1].split()[0] for i in out.split('\n')
+ if i.strip().find('virtual size') >= 0]
+ virt_size = int(size[0])
- disk_info.append({'type': disk_type, 'path': path,
- 'local_gb': size, 'backing_file': backing_file})
+ # real disk size:
+ dk_size = int(os.path.getsize(path))
+ # backing file:(actual path:)
+ backing_file = libvirt_utils.get_disk_backing_file(path)
+
+ disk_info.append({'type': disk_type,
+ 'path': path,
+ 'virt_disk_size': virt_size,
+ 'backing_file': backing_file,
+ 'disk_size': dk_size})
return utils.dumps(disk_info)
+ def get_disk_available_least(self):
+ """Return disk available least size.
+
+ The size of available disk, when block_migration command given
+ disk_over_commit param is FALSE.
+
+ The size that deducted real nstance disk size from the total size
+ of the virtual disk of all instances.
+
+ """
+ # available size of the disk
+ dk_sz_gb = self.get_local_gb_total() - self.get_local_gb_used()
+
+ # Disk size that all instance uses : virtual_size - disk_size
+ instances_name = self.list_instances()
+ instances_sz = 0
+ for i_name in instances_name:
+ disk_infos = utils.loads(self.get_instance_disk_info(i_name))
+ for info in disk_infos:
+ i_vt_sz = int(info['virt_disk_size'])
+ i_dk_sz = int(info['disk_size'])
+ instances_sz += i_vt_sz - i_dk_sz
+
+ # Disk available least size
+ available_least_size = dk_sz_gb * (1024 ** 3) - instances_sz
+ return (available_least_size / 1024 / 1024 / 1024)
+
def unfilter_instance(self, instance_ref, network_info):
"""See comments of same method in firewall_driver."""
self.firewall_driver.unfilter_instance(instance_ref,
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 6cda61204..3c03d2a59 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -705,35 +705,28 @@ class VMHelper(HelperBase):
2. If we're not using Glance, then we need to deduce this based on
whether a kernel_id is specified.
"""
- def log_disk_format(image_type):
- pretty_format = {ImageType.KERNEL: 'KERNEL',
- ImageType.RAMDISK: 'RAMDISK',
- ImageType.DISK: 'DISK',
- ImageType.DISK_RAW: 'DISK_RAW',
- ImageType.DISK_VHD: 'DISK_VHD',
- ImageType.DISK_ISO: 'DISK_ISO'}
- disk_format = pretty_format[image_type]
- image_ref = image_meta['id']
- LOG.debug(_("Detected %(disk_format)s format for image "
- "%(image_ref)s") % locals())
-
- def determine_from_image_meta():
- glance_disk_format2nova_type = {
- 'ami': ImageType.DISK,
- 'aki': ImageType.KERNEL,
- 'ari': ImageType.RAMDISK,
- 'raw': ImageType.DISK_RAW,
- 'vhd': ImageType.DISK_VHD,
- 'iso': ImageType.DISK_ISO}
- disk_format = image_meta['disk_format']
- try:
- return glance_disk_format2nova_type[disk_format]
- except KeyError:
- raise exception.InvalidDiskFormat(disk_format=disk_format)
+ disk_format = image_meta['disk_format']
+
+ disk_format_map = {
+ 'ami': 'DISK',
+ 'aki': 'KERNEL',
+ 'ari': 'RAMDISK',
+ 'raw': 'DISK_RAW',
+ 'vhd': 'DISK_VHD',
+ 'iso': 'DISK_ISO',
+ }
+
+ try:
+ image_type_str = disk_format_map[disk_format]
+ except KeyError:
+ raise exception.InvalidDiskFormat(disk_format=disk_format)
+
+ image_type = getattr(ImageType, image_type_str)
- image_type = determine_from_image_meta()
+ image_ref = image_meta['id']
+ msg = _("Detected %(image_type_str)s format for image %(image_ref)s")
+ LOG.debug(msg % locals())
- log_disk_format(image_type)
return image_type
@classmethod
diff --git a/po/bs.po b/po/bs.po
new file mode 100644
index 000000000..dd9f9fc12
--- /dev/null
+++ b/po/bs.po
@@ -0,0 +1,2801 @@
+# Bosnian translation for nova
+# Copyright (c) 2011 Rosetta Contributors and Canonical Ltd 2011
+# This file is distributed under the same license as the nova package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2011.
+#
+msgid ""
+msgstr ""
+"Project-Id-Version: nova\n"
+"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
+"POT-Creation-Date: 2011-02-21 10:03-0500\n"
+"PO-Revision-Date: 2011-12-27 07:56+0000\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: Bosnian <bs@li.org>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"X-Launchpad-Export-Date: 2011-12-28 05:43+0000\n"
+"X-Generator: Launchpad (build 14560)\n"
+
+#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
+#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
+#: ../nova/scheduler/simple.py:122
+msgid "No hosts found"
+msgstr ""
+
+#: ../nova/exception.py:54
+msgid "Unexpected error while running command."
+msgstr ""
+
+#: ../nova/exception.py:57
+#, python-format
+msgid ""
+"%(description)s\n"
+"Command: %(cmd)s\n"
+"Exit code: %(exit_code)s\n"
+"Stdout: %(stdout)r\n"
+"Stderr: %(stderr)r"
+msgstr ""
+
+#: ../nova/exception.py:107
+msgid "DB exception wrapped"
+msgstr ""
+
+#. exc_type, exc_value, exc_traceback = sys.exc_info()
+#: ../nova/exception.py:127
+msgid "Uncaught exception"
+msgstr ""
+
+#: ../nova/volume/api.py:45
+#, python-format
+msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume"
+msgstr ""
+
+#: ../nova/volume/api.py:57
+#, python-format
+msgid "Volume quota exceeded. You cannot create a volume of size %sG"
+msgstr ""
+
+#: ../nova/volume/api.py:102 ../nova/volume/api.py:172
+#: ../nova/volume/api.py:230
+msgid "Volume status must be available"
+msgstr ""
+
+#: ../nova/volume/api.py:174
+msgid "Volume is already attached"
+msgstr ""
+
+#: ../nova/volume/api.py:180
+msgid "Volume is already detached"
+msgstr ""
+
+#: ../nova/api/openstack/servers.py:72
+msgid "Failed to read private ip"
+msgstr ""
+
+#: ../nova/api/openstack/servers.py:79
+msgid "Failed to read public ip(s)"
+msgstr ""
+
+#: ../nova/api/openstack/servers.py:152
+#, python-format
+msgid "%(param)s property not found for image %(_image_id)s"
+msgstr ""
+
+#: ../nova/api/openstack/servers.py:168
+msgid "No keypairs defined"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:165
+#, python-format
+msgid "Compute.api::lock %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:182
+#, python-format
+msgid "Compute.api::unlock %s"
+msgstr ""
+
+#: ../nova/api/openstack/servers.py:388
+#, python-format
+msgid "Compute.api::get_lock %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:131
+#, python-format
+msgid "Compute.api::reset_network %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:59
+#, python-format
+msgid "Compute.api::pause %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:74
+#, python-format
+msgid "Compute.api::unpause %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:89
+#, python-format
+msgid "compute.api::suspend %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/admin_actions.py:104
+#, python-format
+msgid "compute.api::resume %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101
+#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741
+#: ../nova/api/ec2/__init__.py:317
+#, python-format
+msgid "Instance %s not found"
+msgstr ""
+
+#. NOTE: No Resource Pool concept so far
+#: ../nova/virt/xenapi/volumeops.py:50
+#, python-format
+msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:174
+#, python-format
+msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:187
+#, python-format
+msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:195
+#, python-format
+msgid "Unable to attach volume to instance %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:198
+#, python-format
+msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s"
+msgstr ""
+
+#. Detach VBD from VM
+#: ../nova/virt/xenapi/volumeops.py:208
+#, python-format
+msgid "Detach_volume: %(instance_name)s, %(mountpoint)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:216
+#, python-format
+msgid "Unable to locate volume %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:224
+#, python-format
+msgid "Unable to detach volume %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volumeops.py:238
+#, python-format
+msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s"
+msgstr ""
+
+#: ../nova/compute/instance_types.py:115 ../nova/compute/instance_types.py:127
+#: ../nova/compute/instance_types.py:141 ../nova/compute/instance_types.py:152
+#, python-format
+msgid "Unknown instance type: %s"
+msgstr ""
+
+#: ../nova/crypto.py:50
+msgid "Filename of root CA"
+msgstr ""
+
+#: ../nova/crypto.py:53
+msgid "Filename of private key"
+msgstr ""
+
+#: ../nova/crypto.py:55
+msgid "Filename of root Certificate Revokation List"
+msgstr ""
+
+#: ../nova/crypto.py:57
+msgid "Where we keep our keys"
+msgstr ""
+
+#: ../nova/crypto.py:59
+msgid "Where we keep our root CA"
+msgstr ""
+
+#: ../nova/crypto.py:61
+msgid "Should we use a CA for each project?"
+msgstr ""
+
+#: ../nova/crypto.py:65
+#, python-format
+msgid "Subject for certificate for users, %s for project, user, timestamp"
+msgstr ""
+
+#: ../nova/crypto.py:70
+#, python-format
+msgid "Subject for certificate for projects, %s for project, timestamp"
+msgstr ""
+
+#: ../nova/crypto.py:75
+#, python-format
+msgid "Subject for certificate for vpns, %s for project, timestamp"
+msgstr ""
+
+#: ../nova/crypto.py:277
+#, python-format
+msgid "Flags path: %s"
+msgstr ""
+
+#: ../nova/scheduler/manager.py:69
+#, python-format
+msgid "Casting to %(topic)s %(host)s for %(method)s"
+msgstr ""
+
+#: ../nova/compute/manager.py:101
+#, python-format
+msgid "check_instance_lock: decorating: |%s|"
+msgstr ""
+
+#: ../nova/compute/manager.py:104
+#, python-format
+msgid ""
+"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|"
+msgstr ""
+
+#: ../nova/compute/manager.py:107
+#, python-format
+msgid "check_instance_lock: locked: |%s|"
+msgstr ""
+
+#: ../nova/compute/manager.py:109
+#, python-format
+msgid "check_instance_lock: admin: |%s|"
+msgstr ""
+
+#: ../nova/compute/manager.py:114
+#, python-format
+msgid "check_instance_lock: executing: |%s|"
+msgstr ""
+
+#: ../nova/compute/manager.py:118
+#, python-format
+msgid "check_instance_lock: not executing |%s|"
+msgstr ""
+
+#: ../nova/compute/manager.py:368
+msgid "Instance has already been created"
+msgstr ""
+
+#: ../nova/compute/manager.py:425
+#, python-format
+msgid "instance %s: starting..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:216
+#, python-format
+msgid "instance %s: Failed to spawn"
+msgstr ""
+
+#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286
+#, python-format
+msgid "Terminating instance %s"
+msgstr ""
+
+#: ../nova/compute/manager.py:255
+#, python-format
+msgid "Deallocating address %s"
+msgstr ""
+
+#: ../nova/compute/manager.py:558
+#, python-format
+msgid "trying to destroy already destroyed instance: %s"
+msgstr ""
+
+#: ../nova/compute/manager.py:709
+#, python-format
+msgid "Rebooting instance %s"
+msgstr ""
+
+#: ../nova/compute/manager.py:599
+#, python-format
+msgid ""
+"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s "
+"expected: %(running)s)"
+msgstr ""
+
+#: ../nova/compute/manager.py:768
+#, python-format
+msgid "instance %s: snapshotting"
+msgstr ""
+
+#: ../nova/compute/manager.py:651
+#, python-format
+msgid ""
+"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s "
+"expected: %(running)s)"
+msgstr ""
+
+#: ../nova/compute/manager.py:332
+#, python-format
+msgid ""
+"trying to reset the password on a non-running instance: %(instance_id)s "
+"(state: %(instance_state)s expected: %(expected_state)s)"
+msgstr ""
+
+#: ../nova/compute/manager.py:335
+#, python-format
+msgid "instance %s: setting admin password"
+msgstr ""
+
+#: ../nova/compute/manager.py:772
+#, python-format
+msgid ""
+"trying to inject a file into a non-running instance: %(instance_id)s (state: "
+"%(instance_state)s expected: %(expected_state)s)"
+msgstr ""
+
+#: ../nova/compute/manager.py:362
+#, python-format
+msgid "instance %(nm)s: injecting file to %(plain_path)s"
+msgstr ""
+
+#: ../nova/compute/manager.py:938
+#, python-format
+msgid "instance %s: rescuing"
+msgstr ""
+
+#: ../nova/compute/manager.py:960
+#, python-format
+msgid "instance %s: unrescuing"
+msgstr ""
+
+#: ../nova/compute/manager.py:1240
+#, python-format
+msgid "instance %s: pausing"
+msgstr ""
+
+#: ../nova/compute/manager.py:1257
+#, python-format
+msgid "instance %s: unpausing"
+msgstr ""
+
+#: ../nova/compute/manager.py:1285
+#, python-format
+msgid "instance %s: retrieving diagnostics"
+msgstr ""
+
+#: ../nova/compute/manager.py:1293
+#, python-format
+msgid "instance %s: suspending"
+msgstr ""
+
+#: ../nova/compute/manager.py:1310
+#, python-format
+msgid "instance %s: resuming"
+msgstr ""
+
+#: ../nova/compute/manager.py:1328
+#, python-format
+msgid "instance %s: locking"
+msgstr ""
+
+#: ../nova/compute/manager.py:1336
+#, python-format
+msgid "instance %s: unlocking"
+msgstr ""
+
+#: ../nova/compute/manager.py:1343
+#, python-format
+msgid "instance %s: getting locked state"
+msgstr ""
+
+#: ../nova/compute/manager.py:1352
+#, python-format
+msgid "instance %s: reset network"
+msgstr ""
+
+#: ../nova/compute/manager.py:1372 ../nova/api/ec2/cloud.py:800
+#, python-format
+msgid "Get console output for instance %s"
+msgstr ""
+
+#: ../nova/compute/manager.py:1381
+#, python-format
+msgid "instance %s: getting ajax console"
+msgstr ""
+
+#: ../nova/compute/manager.py:1239
+#, python-format
+msgid ""
+"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s"
+msgstr ""
+
+#. pylint: disable=W0702
+#. NOTE(vish): The inline callback eats the exception info so we
+#. log the traceback here and reraise the same
+#. ecxception below.
+#: ../nova/compute/manager.py:1265
+#, python-format
+msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing"
+msgstr ""
+
+#: ../nova/compute/manager.py:1472
+#, python-format
+msgid ""
+"Detach volume %(volume_id)s from mountpoint %(mp)s on instance "
+"%(instance_id)s"
+msgstr ""
+
+#: ../nova/compute/manager.py:1477
+#, python-format
+msgid "Detaching volume from unknown instance %s"
+msgstr ""
+
+#: ../nova/scheduler/simple.py:52
+#, python-format
+msgid "Host %s is not alive"
+msgstr ""
+
+#: ../nova/scheduler/simple.py:67
+msgid "All hosts have too many cores"
+msgstr ""
+
+#: ../nova/scheduler/simple.py:94 ../nova/scheduler/vsa.py:223
+#, python-format
+msgid "Host %s not available"
+msgstr ""
+
+#: ../nova/scheduler/simple.py:119
+msgid "All hosts have too many gigabytes"
+msgstr ""
+
+#: ../nova/scheduler/simple.py:136
+msgid "All hosts have too many networks"
+msgstr ""
+
+#: ../nova/volume/manager.py:89
+#, python-format
+msgid "Re-exporting %s volumes"
+msgstr ""
+
+#: ../nova/volume/manager.py:94
+#, python-format
+msgid "volume %s: skipping export"
+msgstr ""
+
+#: ../nova/volume/manager.py:100
+#, python-format
+msgid "volume %s: creating"
+msgstr ""
+
+#: ../nova/volume/manager.py:112
+#, python-format
+msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG"
+msgstr ""
+
+#: ../nova/volume/manager.py:124
+#, python-format
+msgid "volume %s: creating export"
+msgstr ""
+
+#: ../nova/volume/manager.py:138
+#, python-format
+msgid "volume %s: created successfully"
+msgstr ""
+
+#: ../nova/volume/manager.py:167
+msgid "Volume is still attached"
+msgstr ""
+
+#: ../nova/volume/manager.py:169
+msgid "Volume is not local to this node"
+msgstr ""
+
+#: ../nova/volume/manager.py:173
+#, python-format
+msgid "volume %s: removing export"
+msgstr ""
+
+#: ../nova/volume/manager.py:175
+#, python-format
+msgid "volume %s: deleting"
+msgstr ""
+
+#: ../nova/volume/manager.py:190
+#, python-format
+msgid "volume %s: deleted successfully"
+msgstr ""
+
+#: ../nova/virt/xenapi/fake.py:77 ../nova/virt/vmwareapi/fake.py:45
+#, python-format
+msgid "%(text)s: _db_content => %(content)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/fake.py:495 ../nova/virt/xenapi/fake.py:594
+#: ../nova/virt/xenapi/fake.py:612 ../nova/virt/xenapi/fake.py:675
+msgid "Raising NotImplemented"
+msgstr ""
+
+#: ../nova/virt/xenapi/fake.py:497
+#, python-format
+msgid "xenapi.fake does not have an implementation for %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/fake.py:531
+#, python-format
+msgid "Calling %(localname)s %(impl)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/fake.py:536
+#, python-format
+msgid "Calling getter %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/fake.py:596
+#, python-format
+msgid ""
+"xenapi.fake does not have an implementation for %s or it has been called "
+"with the wrong number of arguments"
+msgstr ""
+
+#: ../nova/tests/test_cloud.py:256
+msgid "Can't test instances without a real virtual env."
+msgstr ""
+
+#: ../nova/tests/test_cloud.py:268
+#, python-format
+msgid "Need to watch instance %s until it's running..."
+msgstr ""
+
+#: ../nova/virt/connection.py:78
+msgid "Failed to open connection to the hypervisor"
+msgstr ""
+
+#: ../nova/network/linux_net.py:931
+#, python-format
+msgid "Starting VLAN inteface %s"
+msgstr ""
+
+#: ../nova/network/linux_net.py:960
+#, python-format
+msgid "Starting Bridge interface for %s"
+msgstr ""
+
+#. pylint: disable=W0703
+#: ../nova/network/linux_net.py:660
+#, python-format
+msgid "Hupping dnsmasq threw %s"
+msgstr ""
+
+#: ../nova/network/linux_net.py:662
+#, python-format
+msgid "Pid %d is stale, relaunching dnsmasq"
+msgstr ""
+
+#. pylint: disable=W0703
+#: ../nova/network/linux_net.py:722
+#, python-format
+msgid "killing radvd threw %s"
+msgstr ""
+
+#: ../nova/network/linux_net.py:724
+#, python-format
+msgid "Pid %d is stale, relaunching radvd"
+msgstr ""
+
+#. pylint: disable=W0703
+#: ../nova/network/linux_net.py:763
+#, python-format
+msgid "Killing dnsmasq threw %s"
+msgstr ""
+
+#: ../nova/utils.py:66
+#, python-format
+msgid "Inner Exception: %s"
+msgstr ""
+
+#: ../nova/utils.py:59
+#, python-format
+msgid "Class %s cannot be found"
+msgstr ""
+
+#: ../nova/utils.py:150
+#, python-format
+msgid "Fetching %s"
+msgstr ""
+
+#: ../nova/utils.py:198
+#, python-format
+msgid "Running cmd (subprocess): %s"
+msgstr ""
+
+#: ../nova/utils.py:214 ../nova/utils.py:264
+#, python-format
+msgid "Result was %s"
+msgstr ""
+
+#: ../nova/utils.py:240
+#, python-format
+msgid "Running cmd (SSH): %s"
+msgstr ""
+
+#: ../nova/utils.py:304
+#, python-format
+msgid "debug in callback: %s"
+msgstr ""
+
+#: ../nova/utils.py:309
+#, python-format
+msgid "Running %s"
+msgstr ""
+
+#: ../nova/utils.py:414
+#, python-format
+msgid "Link Local address is not found.:%s"
+msgstr ""
+
+#: ../nova/utils.py:417
+#, python-format
+msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
+msgstr ""
+
+#: ../nova/utils.py:514
+#, python-format
+msgid "Invalid backend: %s"
+msgstr ""
+
+#: ../nova/utils.py:525
+#, python-format
+msgid "backend %s"
+msgstr ""
+
+#: ../nova/testing/fake/rabbit.py:50
+#, python-format
+msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s"
+msgstr ""
+
+#: ../nova/testing/fake/rabbit.py:54
+#, python-format
+msgid "Publishing to route %s"
+msgstr ""
+
+#: ../nova/testing/fake/rabbit.py:84
+#, python-format
+msgid "Declaring queue %s"
+msgstr ""
+
+#: ../nova/testing/fake/rabbit.py:90
+#, python-format
+msgid "Declaring exchange %s"
+msgstr ""
+
+#: ../nova/testing/fake/rabbit.py:96
+#, python-format
+msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s"
+msgstr ""
+
+#: ../nova/testing/fake/rabbit.py:133
+#, python-format
+msgid "Getting from %(queue)s: %(message)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:192 ../nova/virt/hyperv.py:189
+#, python-format
+msgid "Created VM %s..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:195
+#, python-format
+msgid "Created VM %(instance_name)s as %(vm_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:206
+#, python-format
+msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:209
+#, python-format
+msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:246
+#, python-format
+msgid "VBD not found in instance %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:257
+#, python-format
+msgid "Unable to unplug VBD %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:268
+#, python-format
+msgid "Unable to destroy VBD %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:1449
+#, python-format
+msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:1452
+#, python-format
+msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:295
+#, python-format
+msgid ""
+"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on "
+"%(sr_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:322
+#, python-format
+msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:336
+#, python-format
+msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s."
+msgstr ""
+
+#. NOTE(sirp): Currently we only support uploading images as VHD, there
+#. is no RAW equivalent (yet)
+#: ../nova/virt/xenapi/vm_utils.py:366
+#, python-format
+msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:327
+#, python-format
+msgid "Size for image %(image)s:%(virtual_size)d"
+msgstr ""
+
+#. TODO(sirp): This should eventually be the URI for the Glance image
+#: ../nova/virt/xenapi/vm_utils.py:1157
+#, python-format
+msgid "Glance image %s"
+msgstr ""
+
+#. We need to invoke a plugin for copying the
+#. content of the VDI into the proper path.
+#: ../nova/virt/xenapi/vm_utils.py:671
+#, python-format
+msgid "Copying VDI %s to /boot/guest on dom0"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:681
+#, python-format
+msgid "Kernel/Ramdisk VDI %s destroyed"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:361
+#, python-format
+msgid "Asking xapi to fetch %(url)s as %(access)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:759
+#, python-format
+msgid "Looking up vdi %s for PV kernel"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:397
+#, python-format
+msgid "PV Kernel in VDI:%s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1294
+#, python-format
+msgid "Running pygrub against %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1301
+#, python-format
+msgid "Found Xen kernel %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1303
+msgid "No Xen kernel found. Booting HVM."
+msgstr ""
+
+#: ../nova/virt/hyperv.py:450
+#, python-format
+msgid "duplicate name found: %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:819
+#, python-format
+msgid "VDI %s is still available"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:857
+#, python-format
+msgid "(VM_UTILS) xenserver vm state -> |%s|"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:859
+#, python-format
+msgid "(VM_UTILS) xenapi power_state -> |%s|"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1101
+#, python-format
+msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:915
+#, python-format
+msgid "Re-scanning SR %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1152
+#, python-format
+msgid ""
+"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1159
+#, python-format
+msgid ""
+"Parent %(parent_uuid)s doesn't match original parent "
+"%(original_parent_uuid)s, waiting for coalesce..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:911
+#, python-format
+msgid "No VDIs found for VM %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:594
+#, python-format
+msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1221
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188
+#, python-format
+msgid "Creating VBD for VDI %s ... "
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1223
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190
+#, python-format
+msgid "Creating VBD for VDI %s done."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1225
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192
+#, python-format
+msgid "Plugging VBD %s ... "
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1228
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194
+#, python-format
+msgid "Plugging VBD %s done."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:661
+#, python-format
+msgid "VBD %(vbd)s plugged as %(orig_dev)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:664
+#, python-format
+msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1242
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197
+#, python-format
+msgid "Destroying VBD for VDI %s ... "
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1246
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200
+#, python-format
+msgid "Destroying VBD for VDI %s done."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1258
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211
+msgid "VBD.unplug successful first time."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1263
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216
+msgid "VBD.unplug rejected: retrying..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1268
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220
+msgid "VBD.unplug successful eventually."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1271
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223
+#, python-format
+msgid "Ignoring XenAPI.Failure in VBD.unplug: %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1280
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66
+#, python-format
+msgid "Ignoring XenAPI.Failure %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1140
+#, python-format
+msgid ""
+"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vm_utils.py:1361
+#, python-format
+msgid "Writing partition table %s done."
+msgstr ""
+
+#: ../nova/tests/rpc/common.py:142
+#, python-format
+msgid "Nested received %(queue)s, %(value)s"
+msgstr ""
+
+#: ../nova/tests/rpc/common.py:150
+#, python-format
+msgid "Nested return %s"
+msgstr ""
+
+#: ../nova/tests/rpc/common.py:176 ../nova/tests/rpc/common.py:182
+#, python-format
+msgid "Received %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:50
+msgid "Use of empty request context is deprecated"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:133
+#, python-format
+msgid "No service for id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:251
+#, python-format
+msgid "No service for %(host)s, %(binary)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:592
+msgid "No fixed ips defined"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:608
+#, python-format
+msgid "No floating ip for address %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:629
+#, python-format
+msgid "No address for instance %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:961
+#, python-format
+msgid "no keypair for user %(user_id)s, name %(name)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156
+#, python-format
+msgid "No network for id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1086
+msgid "No networks defined"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1115
+#, python-format
+msgid "No network for bridge %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142
+#, python-format
+msgid "No network for instance %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1277
+#, python-format
+msgid "Token %s does not exist"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1302
+#, python-format
+msgid "No quota for project_id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501
+#: ../nova/api/ec2/__init__.py:323
+#, python-format
+msgid "Volume %s not found"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1514
+#, python-format
+msgid "No export device found for volume %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1527
+#, python-format
+msgid "No target id found for volume %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1572
+#, python-format
+msgid "No security group with id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1589
+#, python-format
+msgid "No security group named %(group_name)s for project: %(project_id)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1682
+#, python-format
+msgid "No secuity group rule with id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1756
+#, python-format
+msgid "No user for id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1772
+#, python-format
+msgid "No user for access key %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1834
+#, python-format
+msgid "No project with id %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1979
+#, python-format
+msgid "No console pool with id %(pool_id)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:1996
+#, python-format
+msgid ""
+"No console pool of type %(console_type)s for compute host %(compute_host)s "
+"on proxy host %(host)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:2035
+#, python-format
+msgid "No console for instance %(instance_id)s in pool %(pool_id)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:2057
+#, python-format
+msgid "on instance %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:2058
+#, python-format
+msgid "No console with id %(console_id)s %(idesc)s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097
+#, python-format
+msgid "No zone with id %(zone_id)s"
+msgstr ""
+
+#: ../nova/compute/manager.py:192
+#, python-format
+msgid "Checking state of %s"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:165
+#, python-format
+msgid "Current state of %(name)s was %(state)s."
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:226
+#, python-format
+msgid "Connecting to libvirt: %s"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:240
+msgid "Connection to libvirt broke"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:394
+#, python-format
+msgid "instance %(instance_name)s: deleting instance files %(target)s"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:283
+#, python-format
+msgid "Invalid device path %s"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:313
+#, python-format
+msgid "No disk at %s"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:320
+msgid "Instance snapshotting is not supported for libvirtat this time"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:336
+#, python-format
+msgid "instance %s: rebooted"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:339
+#, python-format
+msgid "_wait_for_reboot failed: %s"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:382
+#, python-format
+msgid "instance %s: rescued"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:385
+#, python-format
+msgid "_wait_for_rescue failed: %s"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:670
+#, python-format
+msgid "instance %s: is running"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:422
+#, python-format
+msgid "instance %s: booted"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186
+#, python-format
+msgid "instance %s: failed to boot"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:693
+#, python-format
+msgid "virsh said: %r"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:697
+msgid "cool, it's a device"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:708
+#, python-format
+msgid "data: %(data)r, fpath: %(fpath)r"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:745
+#, python-format
+msgid "Contents of file %(fpath)s: %(contents)r"
+msgstr ""
+
+#: ../nova/virt/libvirt/utils.py:218
+msgid "Unable to find an open port"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:854
+#, python-format
+msgid "instance %s: Creating image"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:646
+#, python-format
+msgid "instance %(inst_name)s: injecting key into image %(img_id)s"
+msgstr ""
+
+#: ../nova/virt/libvirt_conn.py:649
+#, python-format
+msgid "instance %(inst_name)s: injecting net into image %(img_id)s"
+msgstr ""
+
+#. This could be a windows image, or a vmdk format disk
+#: ../nova/virt/libvirt/connection.py:1044
+#, python-format
+msgid ""
+"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s "
+"(%(e)s)"
+msgstr ""
+
+#. TODO(termie): cache?
+#: ../nova/virt/libvirt/connection.py:1183
+#, python-format
+msgid "instance %s: starting toXML method"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:1187
+#, python-format
+msgid "instance %s: finished toXML method"
+msgstr ""
+
+#: ../nova/virt/libvirt/connection.py:1330
+msgid "diagnostics are not supported for libvirt"
+msgstr ""
+
+#: ../nova/virt/libvirt/firewall.py:540
+#, python-format
+msgid "Attempted to unfilter instance %s which is not filtered"
+msgstr ""
+
+#: ../nova/api/metadata/handler.py:250 ../nova/api/metadata/handler.py:257
+#, python-format
+msgid "Failed to get metadata for ip: %s"
+msgstr ""
+
+#: ../nova/auth/fakeldap.py:33
+msgid "Attempted to instantiate singleton"
+msgstr ""
+
+#: ../nova/network/api.py:39
+#, python-format
+msgid "Quota exceeeded for %s, tried to allocate address"
+msgstr ""
+
+#: ../nova/network/manager.py:296
+msgid "Address quota exceeded. You cannot allocate any more addresses"
+msgstr ""
+
+#: ../nova/tests/test_volume.py:190
+#, python-format
+msgid "Target %s allocated"
+msgstr ""
+
+#: ../nova/virt/images.py:70
+#, python-format
+msgid "Finished retreving %(url)s -- placed in %(path)s"
+msgstr ""
+
+#: ../nova/scheduler/driver.py:165
+msgid "Must implement a fallback schedule"
+msgstr ""
+
+#: ../nova/console/manager.py:71 ../nova/console/vmrc_manager.py:64
+msgid "Adding console"
+msgstr ""
+
+#: ../nova/console/manager.py:91
+#, python-format
+msgid "Tried to remove non-existant console %(console_id)s."
+msgstr ""
+
+#: ../nova/api/direct.py:219
+msgid "not available"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:62
+#, python-format
+msgid "The key_pair %s already exists"
+msgstr ""
+
+#. TODO(vish): Do this with M2Crypto instead
+#: ../nova/api/ec2/cloud.py:227
+#, python-format
+msgid "Generating root CA: %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:385
+#, python-format
+msgid "Create key pair %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:414
+#, python-format
+msgid "Delete key pair %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:386
+#, python-format
+msgid "%s is not a valid ipProtocol"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:390
+msgid "Invalid port range"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/security_groups.py:349
+#, python-format
+msgid "Revoke security group ingress %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/security_groups.py:211
+msgid "Not enough parameters to build a valid rule."
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:673 ../nova/api/ec2/cloud.py:729
+msgid "No rule for the specified parameters."
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/security_groups.py:196
+#, python-format
+msgid "Authorize security group ingress %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/security_groups.py:218
+#, python-format
+msgid "This rule already exists in group %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:765
+#: ../nova/api/openstack/v2/contrib/security_groups.py:138
+#, python-format
+msgid "Create Security Group %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:768
+#, python-format
+msgid "group %s already exists"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:795
+#: ../nova/api/openstack/v2/contrib/security_groups.py:98
+#, python-format
+msgid "Delete security group %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/volumes.py:143
+#: ../nova/api/ec2/cloud.py:887
+#, python-format
+msgid "Create volume of %s GB"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:922
+#, python-format
+msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/contrib/volumes.py:327
+#: ../nova/api/ec2/cloud.py:936
+#, python-format
+msgid "Detach volume %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1221
+msgid "Allocate address"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1233
+#, python-format
+msgid "Release address %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1238
+#, python-format
+msgid "Associate address %(public_ip)s to instance %(instance_id)s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1248
+#, python-format
+msgid "Disassociate address %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1296
+msgid "Going to start terminating instances"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1305
+#, python-format
+msgid "Reboot instance %r"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1445
+#, python-format
+msgid "De-registering image %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1472
+#, python-format
+msgid "Registered image %(image_location)s with id %(image_id)s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:963 ../nova/api/ec2/cloud.py:1024
+#: ../nova/api/ec2/cloud.py:1500 ../nova/api/ec2/cloud.py:1515
+#, python-format
+msgid "attribute not supported: %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:890
+#, python-format
+msgid "invalid id: %s"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1518
+msgid "user or group not specified"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1520
+msgid "only group \"all\" is supported"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1522
+msgid "operation_type must be add or remove"
+msgstr ""
+
+#: ../nova/api/ec2/cloud.py:1523
+#, python-format
+msgid "Updating image %s publicity"
+msgstr ""
+
+#: ../bin/nova-api.py:52
+#, python-format
+msgid "Using paste.deploy config at: %s"
+msgstr ""
+
+#: ../bin/nova-api.py:57
+#, python-format
+msgid "No paste configuration for app: %s"
+msgstr ""
+
+#: ../bin/nova-api.py:59
+#, python-format
+msgid ""
+"App Config: %(api)s\n"
+"%(config)r"
+msgstr ""
+
+#: ../bin/nova-api.py:64
+#, python-format
+msgid "Running %s API"
+msgstr ""
+
+#: ../bin/nova-api.py:69
+#, python-format
+msgid "No known API applications configured in %s."
+msgstr ""
+
+#: ../bin/nova-api.py:83
+#, python-format
+msgid "Starting nova-api node (version %s)"
+msgstr ""
+
+#: ../bin/nova-api.py:89
+#, python-format
+msgid "No paste configuration found for: %s"
+msgstr ""
+
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84
+#, python-format
+msgid "Argument %(key)s value %(value)s is too short."
+msgstr ""
+
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89
+#, python-format
+msgid "Argument %(key)s value %(value)s contains invalid characters."
+msgstr ""
+
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94
+#, python-format
+msgid "Argument %(key)s value %(value)s starts with a hyphen."
+msgstr ""
+
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130
+#, python-format
+msgid "Argument %s is required."
+msgstr ""
+
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117
+#, python-format
+msgid ""
+"Argument %(key)s may not take value %(value)s. Valid values are ['true', "
+"'false']."
+msgstr ""
+
+#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163
+#, python-format
+msgid ""
+"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:67
+#, python-format
+msgid "Attempted to create non-unique name %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:73
+#, python-format
+msgid "instance %(name)s: not enough free memory"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:388
+#, python-format
+msgid "Starting VM %s..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:391
+#, python-format
+msgid "Spawning VM %(instance_name)s created %(vm_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:162
+#, python-format
+msgid "Invalid value for onset_files: '%s'"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:437
+#, python-format
+msgid "Injecting file path: '%s'"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:467
+#, python-format
+msgid "Instance %s: booted"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:232
+#, python-format
+msgid "Instance not present %s"
+msgstr ""
+
+#. TODO(sirp): Add quiesce and VSS locking support when Windows support
+#. is added
+#: ../nova/virt/xenapi/vmops.py:612
+#, python-format
+msgid "Starting snapshot for VM %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:621
+#, python-format
+msgid "Unable to Snapshot %(vm_ref)s: %(exc)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:606
+#, python-format
+msgid "Finished snapshot and upload for VM %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:356
+#, python-format
+msgid "VM %(vm)s already halted, skipping shutdown..."
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:389
+msgid "Removing kernel/ramdisk files"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:1086
+msgid "kernel/ramdisk files removed"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:561
+#, python-format
+msgid ""
+"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; "
+"args=%(strargs)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:564
+#, python-format
+msgid ""
+"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM "
+"id=%(instance_id)s; args=%(strargs)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:569
+#, python-format
+msgid ""
+"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; "
+"args=%(strargs)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/vmops.py:1764
+#, python-format
+msgid "OpenSSL error: %s"
+msgstr ""
+
+#: ../nova/tests/test_compute.py:288 ../nova/tests/test_compute.py:1165
+#, python-format
+msgid "Running instances: %s"
+msgstr ""
+
+#: ../nova/tests/test_compute.py:294
+#, python-format
+msgid "After terminating instances: %s"
+msgstr ""
+
+#: ../nova/cloudpipe/pipelib.py:45
+msgid "Template for script to run on cloudpipe instance boot"
+msgstr ""
+
+#: ../nova/cloudpipe/pipelib.py:48
+msgid "Network to push into openvpn config"
+msgstr ""
+
+#: ../nova/cloudpipe/pipelib.py:51
+msgid "Netmask to push into openvpn config"
+msgstr ""
+
+#: ../nova/cloudpipe/pipelib.py:96
+#, python-format
+msgid "Launching VPN for %s"
+msgstr ""
+
+#: ../nova/db/sqlalchemy/migration.py:36
+msgid "python-migrate is not installed. Exiting."
+msgstr ""
+
+#: ../nova/image/s3.py:99
+#, python-format
+msgid "Image %s could not be found"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:134
+msgid "Too many failed authentications."
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:144
+#, python-format
+msgid ""
+"Access key %(access_key)s has had %(failures)d failed authentications and "
+"will be locked out for %(lock_mins)d minutes."
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:204
+#, python-format
+msgid "Authentication Failure: %s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:220
+#, python-format
+msgid "Authenticated Request For %(uname)s:%(pname)s)"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:251
+#, python-format
+msgid "action: %s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:253
+#, python-format
+msgid "arg: %(key)s\t\tval: %(value)s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:328
+#, python-format
+msgid ""
+"Unauthorized request for controller=%(controller)s and action=%(action)s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:359
+#, python-format
+msgid "InstanceNotFound raised: %s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:365
+#, python-format
+msgid "VolumeNotFound raised: %s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:377
+#, python-format
+msgid "NotFound raised: %s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:380
+#, python-format
+msgid "ApiError raised: %s"
+msgstr ""
+
+#: ../nova/api/ec2/__init__.py:409
+#, python-format
+msgid "Unexpected error raised: %s"
+msgstr ""
+
+#: ../nova/api/metadata/handler.py:252 ../nova/api/ec2/__init__.py:414
+msgid "An unknown error has occurred. Please try your request again."
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:84
+#, python-format
+msgid "User %s already exists"
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232
+#, python-format
+msgid "Project can't be created because manager %s doesn't exist"
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243
+#, python-format
+msgid "Project can't be created because user %s doesn't exist"
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229
+#, python-format
+msgid "Project can't be created because project %s already exists"
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268
+#, python-format
+msgid "Project can't be modified because manager %s doesn't exist"
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:245
+#, python-format
+msgid "User \"%s\" not found"
+msgstr ""
+
+#: ../nova/auth/dbdriver.py:248
+#, python-format
+msgid "Project \"%s\" not found"
+msgstr ""
+
+#: ../nova/virt/xenapi_conn.py:159
+msgid ""
+"Must specify xenapi_connection_url, xenapi_connection_username (optionally), "
+"and xenapi_connection_password to use connection_type=xenapi"
+msgstr ""
+
+#: ../nova/virt/xenapi_conn.py:542
+#, python-format
+msgid "Task [%(name)s] %(task)s status: success %(result)s"
+msgstr ""
+
+#: ../nova/virt/xenapi_conn.py:551
+#, python-format
+msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s"
+msgstr ""
+
+#: ../nova/virt/xenapi_conn.py:577 ../nova/virt/xenapi_conn.py:590
+#, python-format
+msgid "Got exception: %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:359
+#, python-format
+msgid "Could not find iSCSI export for volume %s"
+msgstr ""
+
+#: ../nova/api/ec2/apirequest.py:73
+#, python-format
+msgid ""
+"Unsupported API request: controller = %(controller)s, action = %(action)s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/__init__.py:63
+#, python-format
+msgid "Caught error: %s"
+msgstr ""
+
+#: ../nova/api/openstack/v2/__init__.py:113
+msgid "Including admin operations in API."
+msgstr ""
+
+#: ../nova/console/xvp.py:93
+msgid "Rebuilding xvp conf"
+msgstr ""
+
+#: ../nova/console/xvp.py:111
+#, python-format
+msgid "Re-wrote %s"
+msgstr ""
+
+#: ../nova/console/xvp.py:116
+msgid "Stopping xvp"
+msgstr ""
+
+#: ../nova/console/xvp.py:129
+msgid "Starting xvp"
+msgstr ""
+
+#: ../nova/console/xvp.py:136
+#, python-format
+msgid "Error starting xvp: %s"
+msgstr ""
+
+#: ../nova/console/xvp.py:139
+msgid "Restarting xvp"
+msgstr ""
+
+#: ../nova/console/xvp.py:141
+msgid "xvp not running..."
+msgstr ""
+
+#: ../bin/nova-manage.py:272
+msgid ""
+"The above error may show that the database has not been created.\n"
+"Please create a database using nova-manage sync db before running this "
+"command."
+msgstr ""
+
+#: ../bin/nova-manage.py:426
+msgid ""
+"No more networks available. If this is a new installation, you need\n"
+"to call something like this:\n"
+"\n"
+" nova-manage network create 10.0.0.0/8 10 64\n"
+"\n"
+msgstr ""
+
+#: ../bin/nova-manage.py:609
+msgid ""
+"The above error may show that the certificate db has not been created.\n"
+"Please create a database by running a nova-api server on this host."
+msgstr ""
+
+#: ../bin/nova-manage.py:633
+msgid "network"
+msgstr ""
+
+#: ../bin/nova-manage.py:634
+msgid "IP address"
+msgstr ""
+
+#: ../bin/nova-manage.py:635
+msgid "MAC address"
+msgstr ""
+
+#: ../bin/nova-manage.py:636
+msgid "hostname"
+msgstr ""
+
+#: ../bin/nova-manage.py:637 ../bin/nova-manage.py:1136
+#: ../bin/nova-manage.py:1252 ../bin/nova-manage.py:1284
+msgid "host"
+msgstr ""
+
+#: ../bin/nova-manage.py:537
+msgid "netmask"
+msgstr ""
+
+#: ../bin/nova-manage.py:832 ../nova/tests/test_nova_manage.py:183
+msgid "start address"
+msgstr ""
+
+#: ../nova/virt/disk.py:124
+#, python-format
+msgid "Failed to load partition: %s"
+msgstr ""
+
+#: ../nova/virt/disk.py:148 ../nova/virt/disk.py:180
+#, python-format
+msgid "Failed to mount filesystem: %s"
+msgstr ""
+
+#: ../nova/virt/disk.py:217
+#, python-format
+msgid "nbd device %s did not show up"
+msgstr ""
+
+#: ../nova/virt/disk.py:222
+#, python-format
+msgid "Could not attach image to loopback: %s"
+msgstr ""
+
+#: ../nova/virt/disk.py:246
+msgid "No free nbd devices"
+msgstr ""
+
+#: ../doc/ext/nova_todo.py:46
+#, python-format
+msgid "%(filename)s, line %(line_info)d"
+msgstr ""
+
+#. FIXME(chiradeep): implement this
+#: ../nova/virt/hyperv.py:119
+msgid "In init host"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:131
+#, python-format
+msgid "Attempt to create duplicate vm %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:166
+#, python-format
+msgid "Starting VM %s "
+msgstr ""
+
+#: ../nova/virt/hyperv.py:168
+#, python-format
+msgid "Started VM %s "
+msgstr ""
+
+#: ../nova/virt/hyperv.py:170
+#, python-format
+msgid "spawn vm failed: %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:187
+#, python-format
+msgid "Failed to create VM %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:206
+#, python-format
+msgid "Set memory for vm %s..."
+msgstr ""
+
+#: ../nova/virt/hyperv.py:216
+#, python-format
+msgid "Set vcpus for vm %s..."
+msgstr ""
+
+#: ../nova/virt/hyperv.py:220
+#, python-format
+msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:245
+#, python-format
+msgid "Failed to add diskdrive to VM %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:248
+#, python-format
+msgid "New disk drive path is %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:265
+#, python-format
+msgid "Failed to add vhd file to VM %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:267
+#, python-format
+msgid "Created disk for %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:271
+#, python-format
+msgid "Creating nic for %s "
+msgstr ""
+
+#: ../nova/virt/hyperv.py:290
+msgid "Failed creating a port on the external vswitch"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:291
+#, python-format
+msgid "Failed creating port for %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:294
+#, python-format
+msgid "Created switch port %(vm_name)s on switch %(ext_path)s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:304
+#, python-format
+msgid "Failed to add nic to VM %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:306
+#, python-format
+msgid "Created nic for %s "
+msgstr ""
+
+#: ../nova/virt/hyperv.py:339
+#, python-format
+msgid "WMI job failed: %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:343
+#, python-format
+msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s "
+msgstr ""
+
+#: ../nova/virt/hyperv.py:380
+#, python-format
+msgid "Got request to destroy vm %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:405
+#, python-format
+msgid "Failed to destroy vm %s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:412
+#, python-format
+msgid "Del: disk %(vhdfile)s vm %(instance_name)s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:433
+#, python-format
+msgid ""
+"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, "
+"num_cpu=%(numprocs)s, cpu_time=%(uptime)s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:470
+#, python-format
+msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s"
+msgstr ""
+
+#: ../nova/virt/hyperv.py:473
+#, python-format
+msgid "Failed to change vm state of %(vm_name)s to %(req_state)s"
+msgstr ""
+
+#: ../nova/compute/api.py:71
+#, python-format
+msgid "Instance %d was not found in get_network_topic"
+msgstr ""
+
+#: ../nova/compute/api.py:77
+#, python-format
+msgid "Instance %d has no host"
+msgstr ""
+
+#: ../nova/compute/api.py:97
+#, python-format
+msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances"
+msgstr ""
+
+#: ../nova/compute/api.py:202
+#, python-format
+msgid ""
+"Instance quota exceeded. You can only run %s more instances of this type."
+msgstr ""
+
+#: ../nova/compute/api.py:251
+msgid "Creating a raw instance"
+msgstr ""
+
+#: ../nova/compute/api.py:306
+#, python-format
+msgid "Going to run %s instances..."
+msgstr ""
+
+#: ../nova/compute/api.py:187
+#, python-format
+msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s"
+msgstr ""
+
+#: ../nova/compute/api.py:818
+#, python-format
+msgid "Going to try to terminate %s"
+msgstr ""
+
+#: ../nova/compute/api.py:296
+#, python-format
+msgid "Instance %d was not found during terminate"
+msgstr ""
+
+#: ../nova/compute/api.py:301
+#, python-format
+msgid "Instance %d is already being terminated"
+msgstr ""
+
+#: ../nova/compute/api.py:1521
+#, python-format
+msgid "Invalid device specified: %s. Example device: /dev/vdb"
+msgstr ""
+
+#: ../nova/compute/api.py:1536
+msgid "Volume isn't attached to anything!"
+msgstr ""
+
+#: ../nova/rpc.py:98
+#, python-format
+msgid ""
+"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in "
+"%(fl_intv)d seconds."
+msgstr ""
+
+#: ../nova/rpc.py:103
+#, python-format
+msgid "Unable to connect to AMQP server after %d tries. Shutting down."
+msgstr ""
+
+#: ../nova/rpc/impl_carrot.py:220
+msgid "Reconnected to queue"
+msgstr ""
+
+#: ../nova/rpc.py:129
+msgid "Failed to fetch message from queue"
+msgstr ""
+
+#: ../nova/rpc/impl_carrot.py:235
+#, python-format
+msgid "Initing the Adapter Consumer for %s"
+msgstr ""
+
+#: ../nova/rpc/impl_kombu.py:600 ../nova/rpc/impl_carrot.py:255
+#, python-format
+msgid "received %s"
+msgstr ""
+
+#. NOTE(vish): we may not want to ack here, but that means that bad
+#. messages stay in the queue indefinitely, so for now
+#. we just log the message and send an error string
+#. back to the caller
+#: ../nova/rpc/impl_kombu.py:605 ../nova/rpc/impl_carrot.py:268
+#, python-format
+msgid "no method for message: %s"
+msgstr ""
+
+#: ../nova/rpc/impl_kombu.py:606 ../nova/rpc/impl_carrot.py:270
+#, python-format
+msgid "No method for message: %s"
+msgstr ""
+
+#: ../nova/rpc/impl_kombu.py:779 ../nova/rpc/impl_carrot.py:453
+#, python-format
+msgid "Returning exception %s to caller"
+msgstr ""
+
+#: ../nova/rpc/impl_kombu.py:646 ../nova/rpc/impl_carrot.py:486
+#, python-format
+msgid "unpacked context: %s"
+msgstr ""
+
+#: ../nova/rpc.py:313
+msgid "Making asynchronous call..."
+msgstr ""
+
+#: ../nova/rpc/impl_kombu.py:733 ../nova/rpc/impl_carrot.py:522
+#, python-format
+msgid "MSG_ID is %s"
+msgstr ""
+
+#: ../nova/rpc.py:354
+msgid "Making asynchronous cast..."
+msgstr ""
+
+#: ../nova/rpc/impl_carrot.py:621
+#, python-format
+msgid "response %s"
+msgstr ""
+
+#: ../nova/rpc/impl_carrot.py:630
+#, python-format
+msgid "topic is %s"
+msgstr ""
+
+#: ../nova/rpc/impl_carrot.py:631
+#, python-format
+msgid "message %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:79
+#, python-format
+msgid "Recovering from a failed execute. Try number %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:89
+#, python-format
+msgid "volume group %s doesn't exist"
+msgstr ""
+
+#: ../nova/volume/driver.py:342
+#, python-format
+msgid "FAKE AOE: %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:251 ../nova/volume/driver.py:847
+msgid "Skipping ensure_export. No iscsi_target "
+msgstr ""
+
+#: ../nova/volume/driver.py:295 ../nova/volume/driver.py:304
+#: ../nova/volume/driver.py:891
+msgid "Skipping remove_export. No iscsi_target "
+msgstr ""
+
+#: ../nova/volume/driver.py:463
+#, python-format
+msgid "FAKE ISCSI: %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:475
+#, python-format
+msgid "rbd has no pool %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:545
+#, python-format
+msgid "Sheepdog is not working: %s"
+msgstr ""
+
+#: ../nova/volume/driver.py:547
+msgid "Sheepdog is not working"
+msgstr ""
+
+#: ../nova/wsgi.py:68
+#, python-format
+msgid "Starting %(arg0)s on %(host)s:%(port)s"
+msgstr ""
+
+#: ../nova/wsgi.py:208
+msgid "You must implement __call__"
+msgstr ""
+
+#: ../bin/nova-dhcpbridge.py:58
+msgid "leasing ip"
+msgstr ""
+
+#: ../bin/nova-dhcpbridge.py:73
+msgid "Adopted old lease or got a change of mac/hostname"
+msgstr ""
+
+#: ../bin/nova-dhcpbridge.py:78
+msgid "releasing ip"
+msgstr ""
+
+#: ../bin/nova-dhcpbridge.py:123
+#, python-format
+msgid ""
+"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s "
+"on interface %(interface)s"
+msgstr ""
+
+#: ../nova/virt/fake.py:239
+#, python-format
+msgid "Instance %s Not Found"
+msgstr ""
+
+#: ../nova/network/manager.py:386
+#, python-format
+msgid "Dissassociated %s stale fixed ip(s)"
+msgstr ""
+
+#: ../nova/network/manager.py:525
+msgid "setting network host"
+msgstr ""
+
+#: ../nova/network/manager.py:212
+#, python-format
+msgid "Leasing IP %s"
+msgstr ""
+
+#: ../nova/network/manager.py:216
+#, python-format
+msgid "IP %s leased that isn't associated"
+msgstr ""
+
+#: ../nova/network/manager.py:220
+#, python-format
+msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s"
+msgstr ""
+
+#: ../nova/network/manager.py:228
+#, python-format
+msgid "IP %s leased that was already deallocated"
+msgstr ""
+
+#: ../nova/network/manager.py:233
+#, python-format
+msgid "Releasing IP %s"
+msgstr ""
+
+#: ../nova/network/manager.py:237
+#, python-format
+msgid "IP %s released that isn't associated"
+msgstr ""
+
+#: ../nova/network/manager.py:241
+#, python-format
+msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s"
+msgstr ""
+
+#: ../nova/network/manager.py:881
+#, python-format
+msgid "IP %s released that was not leased"
+msgstr ""
+
+#: ../nova/network/manager.py:1289
+msgid ""
+"The sum between the number of networks and the vlan start cannot be greater "
+"than 4094"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:157
+#, python-format
+msgid "Introducing %s..."
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:103
+#: ../nova/virt/xenapi/volume_utils.py:170
+#: ../nova/virt/xenapi/volumeops.py:157
+#, python-format
+msgid "Introduced %(label)s as %(sr_ref)s."
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:76
+#: ../nova/virt/xenapi/volume_utils.py:174
+msgid "Unable to create Storage Repository"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:186
+#, python-format
+msgid "Unable to find SR from VBD %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:96
+#, python-format
+msgid "Forgetting SR %s ... "
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:228
+#, python-format
+msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:234
+#, python-format
+msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:111
+#, python-format
+msgid "Forgetting SR %s done."
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:113
+#, python-format
+msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:249
+#, python-format
+msgid "Unable to introduce VDI on SR %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:257
+#, python-format
+msgid "Unable to get record of VDI %s on"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:279
+#, python-format
+msgid "Unable to introduce VDI for SR %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:175
+#, python-format
+msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s"
+msgstr ""
+
+#: ../nova/virt/xenapi/volume_utils.py:356
+#, python-format
+msgid "Mountpoint cannot be translated: %s"
+msgstr ""
+
+#: ../nova/image/s3.py:365
+#, python-format
+msgid "Failed to decrypt private key: %s"
+msgstr ""
+
+#: ../nova/image/s3.py:374
+#, python-format
+msgid "Failed to decrypt initialization vector: %s"
+msgstr ""
+
+#: ../nova/image/s3.py:385
+#, python-format
+msgid "Failed to decrypt image file %(image_file)s: %(err)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:106
+#, python-format
+msgid "Unknown S3 value type %r"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:137
+msgid "Authenticated request"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:182
+msgid "List of buckets requested"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:209
+#, python-format
+msgid "List keys for bucket %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:217
+#, python-format
+msgid "Unauthorized attempt to access bucket %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:235
+#, python-format
+msgid "Creating bucket %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:245
+#, python-format
+msgid "Deleting bucket %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:249
+#, python-format
+msgid "Unauthorized attempt to delete bucket %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:273
+#, python-format
+msgid "Getting object: %(bname)s / %(nm)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:276
+#, python-format
+msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:296
+#, python-format
+msgid "Putting object: %(bname)s / %(nm)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:299
+#, python-format
+msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:318
+#, python-format
+msgid "Deleting object: %(bname)s / %(nm)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:322
+#, python-format
+msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:396
+#, python-format
+msgid "Not authorized to upload image: invalid directory %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:404
+#, python-format
+msgid "Not authorized to upload image: unauthorized bucket %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:409
+#, python-format
+msgid "Starting image upload: %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:423
+#, python-format
+msgid "Not authorized to update attributes of image %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:431
+#, python-format
+msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r"
+msgstr ""
+
+#. other attributes imply update
+#: ../nova/objectstore/handler.py:436
+#, python-format
+msgid "Updating user fields on image %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:450
+#, python-format
+msgid "Unauthorized attempt to delete image %s"
+msgstr ""
+
+#: ../nova/objectstore/handler.py:455
+#, python-format
+msgid "Deleted image: %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:277
+#, python-format
+msgid "Looking up user: %r"
+msgstr ""
+
+#: ../nova/auth/manager.py:281
+#, python-format
+msgid "Failed authorization for access key %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:264
+#, python-format
+msgid "No user found for access key %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:287
+#, python-format
+msgid "Using project name = user name (%s)"
+msgstr ""
+
+#: ../nova/auth/manager.py:294
+#, python-format
+msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)"
+msgstr ""
+
+#: ../nova/auth/manager.py:279
+#, python-format
+msgid "No project called %s could be found"
+msgstr ""
+
+#: ../nova/auth/manager.py:303
+#, python-format
+msgid ""
+"Failed authorization: user %(uname)s not admin and not member of project "
+"%(pjname)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:289
+#, python-format
+msgid "User %(uid)s is not a member of project %(pjid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:314 ../nova/auth/manager.py:336
+#, python-format
+msgid "Invalid signature for user %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310
+msgid "Signature does not match"
+msgstr ""
+
+#: ../nova/auth/manager.py:428
+msgid "Must specify project"
+msgstr ""
+
+#: ../nova/auth/manager.py:414
+#, python-format
+msgid "The %s role can not be found"
+msgstr ""
+
+#: ../nova/auth/manager.py:416
+#, python-format
+msgid "The %s role is global only"
+msgstr ""
+
+#: ../nova/auth/manager.py:469
+#, python-format
+msgid "Adding role %(role)s to user %(uid)s in project %(pid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:472
+#, python-format
+msgid "Adding sitewide role %(role)s to user %(uid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:498
+#, python-format
+msgid "Removing role %(role)s from user %(uid)s on project %(pid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:501
+#, python-format
+msgid "Removing sitewide role %(role)s from user %(uid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:574
+#, python-format
+msgid "Created project %(name)s with manager %(manager_user)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:592
+#, python-format
+msgid "modifying project %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:604
+#, python-format
+msgid "Adding user %(uid)s to project %(pid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:625
+#, python-format
+msgid "Remove user %(uid)s from project %(pid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:655
+#, python-format
+msgid "Deleting project %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:713
+#, python-format
+msgid "Created user %(rvname)s (admin: %(rvadmin)r)"
+msgstr ""
+
+#: ../nova/auth/manager.py:722
+#, python-format
+msgid "Deleting user %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:732
+#, python-format
+msgid "Access Key change for user %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:734
+#, python-format
+msgid "Secret Key change for user %s"
+msgstr ""
+
+#: ../nova/auth/manager.py:736
+#, python-format
+msgid "Admin status set to %(admin)r for user %(uid)s"
+msgstr ""
+
+#: ../nova/auth/manager.py:781
+#, python-format
+msgid "No vpn data for project %s"
+msgstr ""
+
+#: ../nova/service.py:141
+#, python-format
+msgid "Starting %(topic)s node (version %(vcs_string)s)"
+msgstr ""
+
+#: ../nova/service.py:232
+msgid "Service killed that has no database entry"
+msgstr ""
+
+#: ../nova/service.py:269
+msgid "The service database object disappeared, Recreating it."
+msgstr ""
+
+#: ../nova/service.py:284
+msgid "Recovered model server connection!"
+msgstr ""
+
+#: ../nova/service.py:290
+msgid "model server went away"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:174
+#, python-format
+msgid "LDAP user %s already exists"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:205
+#, python-format
+msgid "LDAP object for %s doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:348
+#, python-format
+msgid "User %s doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:472
+#, python-format
+msgid "Group can't be created because group %s already exists"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:478
+#, python-format
+msgid "Group can't be created because user %s doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:495
+#, python-format
+msgid "User %s can't be searched in group because the user doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:507
+#, python-format
+msgid "User %s can't be added to the group because the user doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521
+#, python-format
+msgid "The group at dn %s doesn't exist"
+msgstr ""
+
+#: ../nova/exception.py:790
+#, python-format
+msgid "User %(uid)s is already a member of the group %(group_dn)s"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:524
+#, python-format
+msgid ""
+"User %s can't be removed from the group because the user doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:528
+#, python-format
+msgid "User %s is not a member of the group"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:621
+#, python-format
+msgid ""
+"Attempted to remove the last member of a group. Deleting the group at %s "
+"instead."
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:549
+#, python-format
+msgid "User %s can't be removed from all because the user doesn't exist"
+msgstr ""
+
+#: ../nova/auth/ldapdriver.py:564
+#, python-format
+msgid "Group at dn %s doesn't exist"
+msgstr ""
+
+#: ../nova/virt/xenapi/network_utils.py:55
+#, python-format
+msgid "Found non-unique network for bridge %s"
+msgstr ""
+
+#: ../nova/virt/xenapi/network_utils.py:58
+#, python-format
+msgid "Found no network for bridge %s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:145
+#, python-format
+msgid "Creating new user: %s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:153
+#, python-format
+msgid "Deleting user: %s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:175
+#, python-format
+msgid "Adding role %(role)s to user %(user)s for project %(project)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:179
+#, python-format
+msgid "Adding sitewide role %(role)s to user %(user)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:185
+#, python-format
+msgid "Removing role %(role)s from user %(user)s for project %(project)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:189
+#, python-format
+msgid "Removing sitewide role %(role)s from user %(user)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:194 ../nova/api/ec2/admin.py:271
+msgid "operation must be add or remove"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:207
+#, python-format
+msgid "Getting x509 for user: %(name)s on project: %(project)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:225
+#, python-format
+msgid "Create project %(name)s managed by %(manager_user)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:238
+#, python-format
+msgid "Modify project: %(name)s managed by %(manager_user)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:248
+#, python-format
+msgid "Delete project: %s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:262
+#, python-format
+msgid "Adding user %(user)s to project %(project)s"
+msgstr ""
+
+#: ../nova/api/ec2/admin.py:266
+#, python-format
+msgid "Removing user %(user)s from project %(project)s"
+msgstr ""
diff --git a/po/es.po b/po/es.po
index fe54e7e4a..5a49ab4e4 100644
--- a/po/es.po
+++ b/po/es.po
@@ -8,14 +8,14 @@ msgstr ""
"Project-Id-Version: nova\n"
"Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n"
"POT-Creation-Date: 2011-02-21 10:03-0500\n"
-"PO-Revision-Date: 2011-12-08 21:33+0000\n"
-"Last-Translator: Edward Pacheco <Unknown>\n"
+"PO-Revision-Date: 2012-01-02 01:45+0000\n"
+"Last-Translator: jsoler <Unknown>\n"
"Language-Team: Spanish <es@li.org>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"X-Launchpad-Export-Date: 2011-12-09 05:20+0000\n"
-"X-Generator: Launchpad (build 14450)\n"
+"X-Launchpad-Export-Date: 2012-01-03 05:04+0000\n"
+"X-Generator: Launchpad (build 14616)\n"
#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55
#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110
@@ -284,11 +284,11 @@ msgstr "check_instance_lock: ejecutando: |%s|"
msgid "check_instance_lock: not executing |%s|"
msgstr "check_instance_lock: no ejecutando |%s|"
-#: ../nova/compute/manager.py:334
+#: ../nova/compute/manager.py:368
msgid "Instance has already been created"
msgstr "La instancia ha sido creada previamente"
-#: ../nova/compute/manager.py:391
+#: ../nova/compute/manager.py:425
#, python-format
msgid "instance %s: starting..."
msgstr "instancia %s: iniciando..."
@@ -308,12 +308,12 @@ msgstr "Finalizando la instancia %s"
msgid "Deallocating address %s"
msgstr "Desasociando la dirección %s"
-#: ../nova/compute/manager.py:524
+#: ../nova/compute/manager.py:558
#, python-format
msgid "trying to destroy already destroyed instance: %s"
msgstr "intentando finalizar una instancia que ya había sido finalizada: %s"
-#: ../nova/compute/manager.py:675
+#: ../nova/compute/manager.py:709
#, python-format
msgid "Rebooting instance %s"
msgstr "Reiniciando instancia %s"
@@ -327,7 +327,7 @@ msgstr ""
"intentando reiniciar una instancia no ejecutada: %(instance_id)s (state: "
"%(state)s expected: %(running)s)"
-#: ../nova/compute/manager.py:734
+#: ../nova/compute/manager.py:768
#, python-format
msgid "instance %s: snapshotting"
msgstr "instancia %s: creando snapshot"
@@ -369,67 +369,67 @@ msgstr ""
msgid "instance %(nm)s: injecting file to %(plain_path)s"
msgstr "instancia %(nm)s: inyectando archivo en %(plain_path)s"
-#: ../nova/compute/manager.py:902
+#: ../nova/compute/manager.py:938
#, python-format
msgid "instance %s: rescuing"
msgstr "instancia %s: rescatando"
-#: ../nova/compute/manager.py:924
+#: ../nova/compute/manager.py:960
#, python-format
msgid "instance %s: unrescuing"
msgstr ""
-#: ../nova/compute/manager.py:1191
+#: ../nova/compute/manager.py:1240
#, python-format
msgid "instance %s: pausing"
msgstr "instancia %s: pausando"
-#: ../nova/compute/manager.py:1208
+#: ../nova/compute/manager.py:1257
#, python-format
msgid "instance %s: unpausing"
msgstr "instancia %s: continuando tras pausa"
-#: ../nova/compute/manager.py:1236
+#: ../nova/compute/manager.py:1285
#, python-format
msgid "instance %s: retrieving diagnostics"
msgstr "instancia %s: obteniendo los diagnosticos"
-#: ../nova/compute/manager.py:1244
+#: ../nova/compute/manager.py:1293
#, python-format
msgid "instance %s: suspending"
msgstr "instancia %s: suspendiendo"
-#: ../nova/compute/manager.py:1261
+#: ../nova/compute/manager.py:1310
#, python-format
msgid "instance %s: resuming"
msgstr "instancia %s: continuando"
-#: ../nova/compute/manager.py:1279
+#: ../nova/compute/manager.py:1328
#, python-format
msgid "instance %s: locking"
msgstr "instancia %s: bloqueando"
-#: ../nova/compute/manager.py:1287
+#: ../nova/compute/manager.py:1336
#, python-format
msgid "instance %s: unlocking"
msgstr "instancia %s: desbloqueando"
-#: ../nova/compute/manager.py:1294
+#: ../nova/compute/manager.py:1343
#, python-format
msgid "instance %s: getting locked state"
msgstr "instancia %s: pasando a estado bloqueado"
-#: ../nova/compute/manager.py:1303
+#: ../nova/compute/manager.py:1352
#, python-format
msgid "instance %s: reset network"
msgstr "instancia %s: reiniciar redes"
-#: ../nova/compute/manager.py:1323 ../nova/api/ec2/cloud.py:800
+#: ../nova/compute/manager.py:1372 ../nova/api/ec2/cloud.py:800
#, python-format
msgid "Get console output for instance %s"
msgstr "Obtener salida de la consola para la instancia %s"
-#: ../nova/compute/manager.py:1332
+#: ../nova/compute/manager.py:1381
#, python-format
msgid "instance %s: getting ajax console"
msgstr "instancia %s: obteniendo consola ajax"
@@ -451,7 +451,7 @@ msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing"
msgstr ""
"instancia %(instance_id)s: adjuntar fallo %(mountpoint)s, removiendo"
-#: ../nova/compute/manager.py:1423
+#: ../nova/compute/manager.py:1472
#, python-format
msgid ""
"Detach volume %(volume_id)s from mountpoint %(mp)s on instance "
@@ -460,7 +460,7 @@ msgstr ""
"Quitar el volumen %(volume_id)s del punto de montaje %(mp)s en la instancia "
"%(instance_id)s"
-#: ../nova/compute/manager.py:1428
+#: ../nova/compute/manager.py:1477
#, python-format
msgid "Detaching volume from unknown instance %s"
msgstr "Desvinculando volumen de instancia desconocida %s"
@@ -470,7 +470,7 @@ msgstr "Desvinculando volumen de instancia desconocida %s"
msgid "Host %s is not alive"
msgstr "Host %s no responde"
-#: ../nova/scheduler/simple.py:65
+#: ../nova/scheduler/simple.py:67
msgid "All hosts have too many cores"
msgstr "Todos los hosts tienen demasiados cores"
@@ -479,11 +479,11 @@ msgstr "Todos los hosts tienen demasiados cores"
msgid "Host %s not available"
msgstr "Host %s no disponible"
-#: ../nova/scheduler/simple.py:117
+#: ../nova/scheduler/simple.py:119
msgid "All hosts have too many gigabytes"
msgstr "Todos los hosts tienen demasiados gigabytes"
-#: ../nova/scheduler/simple.py:134
+#: ../nova/scheduler/simple.py:136
msgid "All hosts have too many networks"
msgstr "Todos los hosts tienen demasiadas redes"
@@ -545,27 +545,27 @@ msgstr "volumen %s: eliminado satisfactoriamente"
msgid "%(text)s: _db_content => %(content)s"
msgstr "%(text)s: _db_content => %(content)s"
-#: ../nova/virt/xenapi/fake.py:492 ../nova/virt/xenapi/fake.py:591
-#: ../nova/virt/xenapi/fake.py:609 ../nova/virt/xenapi/fake.py:672
+#: ../nova/virt/xenapi/fake.py:495 ../nova/virt/xenapi/fake.py:594
+#: ../nova/virt/xenapi/fake.py:612 ../nova/virt/xenapi/fake.py:675
msgid "Raising NotImplemented"
msgstr "Lanzando NotImplemented"
-#: ../nova/virt/xenapi/fake.py:494
+#: ../nova/virt/xenapi/fake.py:497
#, python-format
msgid "xenapi.fake does not have an implementation for %s"
msgstr "xenapi.fake no tiene una implementación para %s"
-#: ../nova/virt/xenapi/fake.py:528
+#: ../nova/virt/xenapi/fake.py:531
#, python-format
msgid "Calling %(localname)s %(impl)s"
msgstr "Llamando %(localname)s %(impl)s"
-#: ../nova/virt/xenapi/fake.py:533
+#: ../nova/virt/xenapi/fake.py:536
#, python-format
msgid "Calling getter %s"
msgstr "Llanado al adquiridor %s"
-#: ../nova/virt/xenapi/fake.py:593
+#: ../nova/virt/xenapi/fake.py:596
#, python-format
msgid ""
"xenapi.fake does not have an implementation for %s or it has been called "
@@ -587,34 +587,34 @@ msgstr "Hay que vigilar la instancia %s hasta que este en ejecución..."
msgid "Failed to open connection to the hypervisor"
msgstr "Fallo al abrir conexión con el hypervisor"
-#: ../nova/network/linux_net.py:923
+#: ../nova/network/linux_net.py:931
#, python-format
msgid "Starting VLAN inteface %s"
msgstr "Iniciando interfaz VLAN %s"
-#: ../nova/network/linux_net.py:952
+#: ../nova/network/linux_net.py:960
#, python-format
msgid "Starting Bridge interface for %s"
msgstr "Iniciando interfaz puente para %s"
#. pylint: disable=W0703
-#: ../nova/network/linux_net.py:652
+#: ../nova/network/linux_net.py:660
#, python-format
msgid "Hupping dnsmasq threw %s"
msgstr "Excepción al recargar la configuración de dnsmasq: %s"
-#: ../nova/network/linux_net.py:654
+#: ../nova/network/linux_net.py:662
#, python-format
msgid "Pid %d is stale, relaunching dnsmasq"
msgstr "El pid %d está pasado, relanzando dnsmasq"
#. pylint: disable=W0703
-#: ../nova/network/linux_net.py:714
+#: ../nova/network/linux_net.py:722
#, python-format
msgid "killing radvd threw %s"
msgstr "Matando radvd lanzado %s"
-#: ../nova/network/linux_net.py:716
+#: ../nova/network/linux_net.py:724
#, python-format
msgid "Pid %d is stale, relaunching radvd"
msgstr "Pid %d corrupto, relanzando radvd"
@@ -625,7 +625,7 @@ msgstr "Pid %d corrupto, relanzando radvd"
msgid "Killing dnsmasq threw %s"
msgstr "Al matar dnsmasq se lanzó %s"
-#: ../nova/utils.py:67
+#: ../nova/utils.py:66
#, python-format
msgid "Inner Exception: %s"
msgstr "Excepción interna: %s"
@@ -635,82 +635,82 @@ msgstr "Excepción interna: %s"
msgid "Class %s cannot be found"
msgstr "La clase %s no ha podido ser encontrada."
-#: ../nova/utils.py:128
+#: ../nova/utils.py:150
#, python-format
msgid "Fetching %s"
msgstr "Obteniendo %s"
-#: ../nova/utils.py:169
+#: ../nova/utils.py:198
#, python-format
msgid "Running cmd (subprocess): %s"
msgstr "Ejecutando cmd (subprocesos): %s"
-#: ../nova/utils.py:185 ../nova/utils.py:235
+#: ../nova/utils.py:214 ../nova/utils.py:264
#, python-format
msgid "Result was %s"
msgstr "El resultado fue %s"
-#: ../nova/utils.py:211
+#: ../nova/utils.py:240
#, python-format
msgid "Running cmd (SSH): %s"
msgstr "corriendo cmd (SSH): %s"
-#: ../nova/utils.py:275
+#: ../nova/utils.py:304
#, python-format
msgid "debug in callback: %s"
msgstr "Depuración de la devolución de llamada: %s"
-#: ../nova/utils.py:280
+#: ../nova/utils.py:309
#, python-format
msgid "Running %s"
msgstr "Ejecutando %s"
-#: ../nova/utils.py:385
+#: ../nova/utils.py:414
#, python-format
msgid "Link Local address is not found.:%s"
msgstr "No se encuentra la dirección del enlace local.:%s"
-#: ../nova/utils.py:388
+#: ../nova/utils.py:417
#, python-format
msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s"
msgstr "No se pudo obtener enlace de la ip local de %(interface)s :%(ex)s"
-#: ../nova/utils.py:485
+#: ../nova/utils.py:514
#, python-format
msgid "Invalid backend: %s"
msgstr "backend inválido: %s"
-#: ../nova/utils.py:496
+#: ../nova/utils.py:525
#, python-format
msgid "backend %s"
msgstr "backend %s"
-#: ../nova/fakerabbit.py:50
+#: ../nova/testing/fake/rabbit.py:50
#, python-format
msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s"
msgstr "(%(nm)s) publica (key: %(routing_key)s) %(message)s"
-#: ../nova/fakerabbit.py:54
+#: ../nova/testing/fake/rabbit.py:54
#, python-format
msgid "Publishing to route %s"
msgstr "Publicando la ruta %s"
-#: ../nova/fakerabbit.py:84
+#: ../nova/testing/fake/rabbit.py:84
#, python-format
msgid "Declaring queue %s"
msgstr "Declarando cola %s"
-#: ../nova/fakerabbit.py:90
+#: ../nova/testing/fake/rabbit.py:90
#, python-format
msgid "Declaring exchange %s"
msgstr "Declarando intercambio %s"
-#: ../nova/fakerabbit.py:96
+#: ../nova/testing/fake/rabbit.py:96
#, python-format
msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s"
msgstr "Enlazando %(queue)s a %(exchange)s con la llave %(routing_key)s"
-#: ../nova/fakerabbit.py:133
+#: ../nova/testing/fake/rabbit.py:133
#, python-format
msgid "Getting from %(queue)s: %(message)s"
msgstr "Obtendiendo desde %(queue)s: %(message)s"
@@ -725,12 +725,12 @@ msgstr "Creada VM %s..."
msgid "Created VM %(instance_name)s as %(vm_ref)s."
msgstr "VM creada %(instance_name)s como %(vm_ref)s."
-#: ../nova/virt/xenapi/volume_utils.py:208
+#: ../nova/virt/xenapi/volume_utils.py:206
#, python-format
msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... "
msgstr "Creando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... "
-#: ../nova/virt/xenapi/volume_utils.py:211
+#: ../nova/virt/xenapi/volume_utils.py:209
#, python-format
msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s."
msgstr "Creado el VBD %(vbd_ref)s para VM %(vm_ref)s, VDI %(vdi_ref)s"
@@ -750,12 +750,12 @@ msgstr "Imposible desconectar VBD %s"
msgid "Unable to destroy VBD %s"
msgstr "Imposible destruir VBD %s"
-#: ../nova/virt/xenapi/vmops.py:1383
+#: ../nova/virt/xenapi/vmops.py:1449
#, python-format
msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s."
msgstr "Creando VIF para VM %(vm_ref)s, red %(network_ref)s."
-#: ../nova/virt/xenapi/vmops.py:1386
+#: ../nova/virt/xenapi/vmops.py:1452
#, python-format
msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s."
msgstr "Creado el VIF %(vif_ref)s para VM %(vm_ref)s, red %(network_ref)s."
@@ -769,21 +769,19 @@ msgstr ""
"VDI creado %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) "
"sobre %(sr_ref)s."
-#. TODO(sirp): Add quiesce and VSS locking support when Windows support
-#. is added
-#: ../nova/virt/xenapi/vm_utils.py:324
+#: ../nova/virt/xenapi/vm_utils.py:322
#, python-format
msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..."
msgstr "Creando snapshot de la VM %(vm_ref)s con etiqueta '%(label)s'..."
-#: ../nova/virt/xenapi/vm_utils.py:338
+#: ../nova/virt/xenapi/vm_utils.py:336
#, python-format
msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s."
msgstr "Instantánea creada %(template_vm_ref)s de la VM %(vm_ref)s."
#. NOTE(sirp): Currently we only support uploading images as VHD, there
#. is no RAW equivalent (yet)
-#: ../nova/virt/xenapi/vm_utils.py:368
+#: ../nova/virt/xenapi/vm_utils.py:366
#, python-format
msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s"
msgstr "Pidiendo xapi a subir %(vdi_uuids)s como ID %(image_id)s"
@@ -801,12 +799,12 @@ msgstr "Imagen Glance %s"
#. We need to invoke a plugin for copying the
#. content of the VDI into the proper path.
-#: ../nova/virt/xenapi/vm_utils.py:692
+#: ../nova/virt/xenapi/vm_utils.py:671
#, python-format
msgid "Copying VDI %s to /boot/guest on dom0"
msgstr "Copiando VDI %s a /boot/guest on dom0"
-#: ../nova/virt/xenapi/vm_utils.py:702
+#: ../nova/virt/xenapi/vm_utils.py:681
#, python-format
msgid "Kernel/Ramdisk VDI %s destroyed"
msgstr "Kernel/Ramdisk VDI %s destruído"
@@ -816,7 +814,7 @@ msgstr "Kernel/Ramdisk VDI %s destruído"
msgid "Asking xapi to fetch %(url)s as %(access)s"
msgstr "Pidiendo a xapi que descargue %(url)s como %(access)s"
-#: ../nova/virt/xenapi/vm_utils.py:780
+#: ../nova/virt/xenapi/vm_utils.py:759
#, python-format
msgid "Looking up vdi %s for PV kernel"
msgstr "Buscando vid %s para el kernel PV"
@@ -826,17 +824,17 @@ msgstr "Buscando vid %s para el kernel PV"
msgid "PV Kernel in VDI:%s"
msgstr "Kernel PV en VDI:%s"
-#: ../nova/virt/xenapi/vm_utils.py:1307
+#: ../nova/virt/xenapi/vm_utils.py:1294
#, python-format
msgid "Running pygrub against %s"
msgstr "Ejecutando pygrub contra %s"
-#: ../nova/virt/xenapi/vm_utils.py:1314
+#: ../nova/virt/xenapi/vm_utils.py:1301
#, python-format
msgid "Found Xen kernel %s"
msgstr "Kernel Xen Encontrado %s"
-#: ../nova/virt/xenapi/vm_utils.py:1316
+#: ../nova/virt/xenapi/vm_utils.py:1303
msgid "No Xen kernel found. Booting HVM."
msgstr "Kernel Xen no encontrado. Reiniciando HVM"
@@ -845,38 +843,38 @@ msgstr "Kernel Xen no encontrado. Reiniciando HVM"
msgid "duplicate name found: %s"
msgstr "se ha encontrado un nombre duplicado: %s"
-#: ../nova/virt/xenapi/vm_utils.py:832
+#: ../nova/virt/xenapi/vm_utils.py:819
#, python-format
msgid "VDI %s is still available"
msgstr "VDI %s está todavía disponible"
-#: ../nova/virt/xenapi/vm_utils.py:870
+#: ../nova/virt/xenapi/vm_utils.py:857
#, python-format
msgid "(VM_UTILS) xenserver vm state -> |%s|"
msgstr "(VM_UTILS) xenserver vm state -> |%s|"
-#: ../nova/virt/xenapi/vm_utils.py:872
+#: ../nova/virt/xenapi/vm_utils.py:859
#, python-format
msgid "(VM_UTILS) xenapi power_state -> |%s|"
msgstr "(VM_UTILS) xenapi power_state -> |%s|"
-#: ../nova/virt/xenapi/vm_utils.py:1046
+#: ../nova/virt/xenapi/vm_utils.py:1101
#, python-format
msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s"
msgstr "VHD %(vdi_uuid)s tiene origen en %(parent_ref)s"
-#: ../nova/virt/xenapi/vm_utils.py:928
+#: ../nova/virt/xenapi/vm_utils.py:915
#, python-format
msgid "Re-scanning SR %s"
msgstr "Re-escaneando SR %s"
-#: ../nova/virt/xenapi/vm_utils.py:1097
+#: ../nova/virt/xenapi/vm_utils.py:1152
#, python-format
msgid ""
"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..."
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1104
+#: ../nova/virt/xenapi/vm_utils.py:1159
#, python-format
msgid ""
"Parent %(parent_uuid)s doesn't match original parent "
@@ -894,25 +892,25 @@ msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s"
msgstr ""
"Numero de VDIs inesperado (%(num_vdis)s) encontrados por VM %(vm_ref)s"
-#: ../nova/virt/xenapi/vm_utils.py:1234
+#: ../nova/virt/xenapi/vm_utils.py:1221
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188
#, python-format
msgid "Creating VBD for VDI %s ... "
msgstr "Creando VBD para VDI %s ... "
-#: ../nova/virt/xenapi/vm_utils.py:1236
+#: ../nova/virt/xenapi/vm_utils.py:1223
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190
#, python-format
msgid "Creating VBD for VDI %s done."
msgstr "Creando VBF para VDI %s terminado"
-#: ../nova/virt/xenapi/vm_utils.py:1238
+#: ../nova/virt/xenapi/vm_utils.py:1225
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192
#, python-format
msgid "Plugging VBD %s ... "
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1241
+#: ../nova/virt/xenapi/vm_utils.py:1228
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194
#, python-format
msgid "Plugging VBD %s done."
@@ -928,40 +926,40 @@ msgstr ""
msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s"
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1255
+#: ../nova/virt/xenapi/vm_utils.py:1242
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197
#, python-format
msgid "Destroying VBD for VDI %s ... "
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1259
+#: ../nova/virt/xenapi/vm_utils.py:1246
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200
#, python-format
msgid "Destroying VBD for VDI %s done."
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1271
+#: ../nova/virt/xenapi/vm_utils.py:1258
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211
msgid "VBD.unplug successful first time."
-msgstr ""
+msgstr "VBD.Primera desconexión satisfactoria."
-#: ../nova/virt/xenapi/vm_utils.py:1276
+#: ../nova/virt/xenapi/vm_utils.py:1263
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216
msgid "VBD.unplug rejected: retrying..."
-msgstr ""
+msgstr "VBD. Desconexión rechazada: reintentándolo..."
-#: ../nova/virt/xenapi/vm_utils.py:1281
+#: ../nova/virt/xenapi/vm_utils.py:1268
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220
msgid "VBD.unplug successful eventually."
-msgstr ""
+msgstr "VBD.Finalmente logro desconectar."
-#: ../nova/virt/xenapi/vm_utils.py:1284
+#: ../nova/virt/xenapi/vm_utils.py:1271
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223
#, python-format
msgid "Ignoring XenAPI.Failure in VBD.unplug: %s"
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1293
+#: ../nova/virt/xenapi/vm_utils.py:1280
#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66
#, python-format
msgid "Ignoring XenAPI.Failure %s"
@@ -973,7 +971,7 @@ msgid ""
"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..."
msgstr ""
-#: ../nova/virt/xenapi/vm_utils.py:1353
+#: ../nova/virt/xenapi/vm_utils.py:1361
#, python-format
msgid "Writing partition table %s done."
msgstr ""
@@ -1152,7 +1150,7 @@ msgstr "Conectando a libvirt: %s"
msgid "Connection to libvirt broke"
msgstr "Conexión a libvirt rota"
-#: ../nova/virt/libvirt/connection.py:390
+#: ../nova/virt/libvirt/connection.py:394
#, python-format
msgid "instance %(instance_name)s: deleting instance files %(target)s"
msgstr ""
@@ -1192,7 +1190,7 @@ msgstr "instancia %s: rescatada"
msgid "_wait_for_rescue failed: %s"
msgstr "_wait_for_rescue falló: %s"
-#: ../nova/virt/libvirt/connection.py:665
+#: ../nova/virt/libvirt/connection.py:670
#, python-format
msgid "instance %s: is running"
msgstr "instancia %s: está ejecutándose"
@@ -1207,16 +1205,16 @@ msgstr "instancia %s: arrancada"
msgid "instance %s: failed to boot"
msgstr "insntancia %s: falló al arrancar"
-#: ../nova/virt/libvirt/connection.py:688
+#: ../nova/virt/libvirt/connection.py:693
#, python-format
msgid "virsh said: %r"
msgstr "virsh dijo: %r"
-#: ../nova/virt/libvirt/connection.py:692
+#: ../nova/virt/libvirt/connection.py:697
msgid "cool, it's a device"
msgstr "genial, es un dispositivo"
-#: ../nova/virt/libvirt/connection.py:703
+#: ../nova/virt/libvirt/connection.py:708
#, python-format
msgid "data: %(data)r, fpath: %(fpath)r"
msgstr ""
@@ -1230,7 +1228,7 @@ msgstr ""
msgid "Unable to find an open port"
msgstr ""
-#: ../nova/virt/libvirt/connection.py:849
+#: ../nova/virt/libvirt/connection.py:854
#, python-format
msgid "instance %s: Creating image"
msgstr "instancia %s: Creando imagen"
@@ -1246,7 +1244,7 @@ msgid "instance %(inst_name)s: injecting net into image %(img_id)s"
msgstr ""
#. This could be a windows image, or a vmdk format disk
-#: ../nova/virt/libvirt/connection.py:1039
+#: ../nova/virt/libvirt/connection.py:1044
#, python-format
msgid ""
"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s "
@@ -1254,12 +1252,12 @@ msgid ""
msgstr ""
#. TODO(termie): cache?
-#: ../nova/virt/libvirt/connection.py:1178
+#: ../nova/virt/libvirt/connection.py:1183
#, python-format
msgid "instance %s: starting toXML method"
msgstr "instancia %s: comenzando método toXML"
-#: ../nova/virt/libvirt/connection.py:1182
+#: ../nova/virt/libvirt/connection.py:1187
#, python-format
msgid "instance %s: finished toXML method"
msgstr "instancia %s: finalizado método toXML"
@@ -1349,12 +1347,12 @@ msgstr "%s no es un ipProtocol valido"
msgid "Invalid port range"
msgstr "Rango de puerto inválido"
-#: ../nova/api/openstack/v2/contrib/security_groups.py:348
+#: ../nova/api/openstack/v2/contrib/security_groups.py:349
#, python-format
msgid "Revoke security group ingress %s"
msgstr "Revocar ingreso al grupo de seguridad %s"
-#: ../nova/api/openstack/v2/contrib/security_groups.py:210
+#: ../nova/api/openstack/v2/contrib/security_groups.py:211
msgid "Not enough parameters to build a valid rule."
msgstr ""
@@ -1362,18 +1360,18 @@ msgstr ""
msgid "No rule for the specified parameters."
msgstr "No hay regla para los parámetros especificados."
-#: ../nova/api/openstack/v2/contrib/security_groups.py:195
+#: ../nova/api/openstack/v2/contrib/security_groups.py:196
#, python-format
msgid "Authorize security group ingress %s"
msgstr "Autorizar ingreso al grupo de seguridad %s"
-#: ../nova/api/openstack/v2/contrib/security_groups.py:217
+#: ../nova/api/openstack/v2/contrib/security_groups.py:218
#, python-format
msgid "This rule already exists in group %s"
msgstr "Esta regla ya existe en el grupo %s"
#: ../nova/api/ec2/cloud.py:765
-#: ../nova/api/openstack/v2/contrib/security_groups.py:137
+#: ../nova/api/openstack/v2/contrib/security_groups.py:138
#, python-format
msgid "Create Security Group %s"
msgstr "Crear Grupo de Seguridad %s"
@@ -1384,12 +1382,12 @@ msgid "group %s already exists"
msgstr "el grupo %s ya existe"
#: ../nova/api/ec2/cloud.py:795
-#: ../nova/api/openstack/v2/contrib/security_groups.py:97
+#: ../nova/api/openstack/v2/contrib/security_groups.py:98
#, python-format
msgid "Delete security group %s"
msgstr "Borrar grupo de seguridad %s"
-#: ../nova/api/openstack/v2/contrib/volumes.py:157
+#: ../nova/api/openstack/v2/contrib/volumes.py:143
#: ../nova/api/ec2/cloud.py:887
#, python-format
msgid "Create volume of %s GB"
@@ -1400,7 +1398,7 @@ msgstr "Crear volumen de %s GB"
msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s"
msgstr ""
-#: ../nova/api/openstack/v2/contrib/volumes.py:302
+#: ../nova/api/openstack/v2/contrib/volumes.py:327
#: ../nova/api/ec2/cloud.py:936
#, python-format
msgid "Detach volume %s"
@@ -1420,32 +1418,32 @@ msgstr "Liberar dirección %s"
msgid "Associate address %(public_ip)s to instance %(instance_id)s"
msgstr ""
-#: ../nova/api/ec2/cloud.py:1247
+#: ../nova/api/ec2/cloud.py:1248
#, python-format
msgid "Disassociate address %s"
msgstr "Desasociar dirección %s"
-#: ../nova/api/ec2/cloud.py:1303
+#: ../nova/api/ec2/cloud.py:1296
msgid "Going to start terminating instances"
msgstr "Se va a iniciar la finalización de las instancias"
-#: ../nova/api/ec2/cloud.py:1312
+#: ../nova/api/ec2/cloud.py:1305
#, python-format
msgid "Reboot instance %r"
msgstr "Reiniciar instancia %r"
-#: ../nova/api/ec2/cloud.py:1446
+#: ../nova/api/ec2/cloud.py:1445
#, python-format
msgid "De-registering image %s"
msgstr "Des-registrando la imagen %s"
-#: ../nova/api/ec2/cloud.py:1473
+#: ../nova/api/ec2/cloud.py:1472
#, python-format
msgid "Registered image %(image_location)s with id %(image_id)s"
msgstr ""
#: ../nova/api/ec2/cloud.py:963 ../nova/api/ec2/cloud.py:1024
-#: ../nova/api/ec2/cloud.py:1501 ../nova/api/ec2/cloud.py:1516
+#: ../nova/api/ec2/cloud.py:1500 ../nova/api/ec2/cloud.py:1515
#, python-format
msgid "attribute not supported: %s"
msgstr "atributo no soportado: %s"
@@ -1455,19 +1453,19 @@ msgstr "atributo no soportado: %s"
msgid "invalid id: %s"
msgstr "id no valido: %s"
-#: ../nova/api/ec2/cloud.py:1519
+#: ../nova/api/ec2/cloud.py:1518
msgid "user or group not specified"
msgstr "usuario o grupo no especificado"
-#: ../nova/api/ec2/cloud.py:1521
+#: ../nova/api/ec2/cloud.py:1520
msgid "only group \"all\" is supported"
msgstr "sólo el grupo \"all\" está soportado"
-#: ../nova/api/ec2/cloud.py:1523
+#: ../nova/api/ec2/cloud.py:1522
msgid "operation_type must be add or remove"
msgstr "operation_type debe ser añadir o eliminar"
-#: ../nova/api/ec2/cloud.py:1524
+#: ../nova/api/ec2/cloud.py:1523
#, python-format
msgid "Updating image %s publicity"
msgstr "Actualizando imagen %s públicamente"
@@ -1553,12 +1551,12 @@ msgstr "Intentado la creación del nombre no único %s"
msgid "instance %(name)s: not enough free memory"
msgstr ""
-#: ../nova/virt/xenapi/vmops.py:383
+#: ../nova/virt/xenapi/vmops.py:388
#, python-format
msgid "Starting VM %s..."
msgstr "Iniciando VM %s..."
-#: ../nova/virt/xenapi/vmops.py:386
+#: ../nova/virt/xenapi/vmops.py:391
#, python-format
msgid "Spawning VM %(instance_name)s created %(vm_ref)s."
msgstr ""
@@ -1568,12 +1566,12 @@ msgstr ""
msgid "Invalid value for onset_files: '%s'"
msgstr ""
-#: ../nova/virt/xenapi/vmops.py:432
+#: ../nova/virt/xenapi/vmops.py:437
#, python-format
msgid "Injecting file path: '%s'"
msgstr ""
-#: ../nova/virt/xenapi/vmops.py:462
+#: ../nova/virt/xenapi/vmops.py:467
#, python-format
msgid "Instance %s: booted"
msgstr "Instancia %s: iniciada"
@@ -1585,17 +1583,17 @@ msgstr "Instancia no existente %s"
#. TODO(sirp): Add quiesce and VSS locking support when Windows support
#. is added
-#: ../nova/virt/xenapi/vmops.py:607
+#: ../nova/virt/xenapi/vmops.py:612
#, python-format
msgid "Starting snapshot for VM %s"
msgstr "Comenzando snapshot para la VM %s"
-#: ../nova/virt/xenapi/vmops.py:616
+#: ../nova/virt/xenapi/vmops.py:621
#, python-format
msgid "Unable to Snapshot %(vm_ref)s: %(exc)s"
msgstr ""
-#: ../nova/virt/xenapi/vmops.py:601
+#: ../nova/virt/xenapi/vmops.py:606
#, python-format
msgid "Finished snapshot and upload for VM %s"
msgstr "Finalizado el snapshot y la subida de la VM %s"
@@ -1609,7 +1607,7 @@ msgstr ""
msgid "Removing kernel/ramdisk files"
msgstr ""
-#: ../nova/virt/xenapi/vmops.py:1018
+#: ../nova/virt/xenapi/vmops.py:1086
msgid "kernel/ramdisk files removed"
msgstr ""
@@ -1634,17 +1632,17 @@ msgid ""
"args=%(strargs)s"
msgstr ""
-#: ../nova/virt/xenapi/vmops.py:1693
+#: ../nova/virt/xenapi/vmops.py:1764
#, python-format
msgid "OpenSSL error: %s"
msgstr ""
-#: ../nova/tests/test_compute.py:204 ../nova/tests/test_compute.py:1026
+#: ../nova/tests/test_compute.py:288 ../nova/tests/test_compute.py:1165
#, python-format
msgid "Running instances: %s"
msgstr "Ejecutando instancias: %s"
-#: ../nova/tests/test_compute.py:210
+#: ../nova/tests/test_compute.py:294
#, python-format
msgid "After terminating instances: %s"
msgstr "Después de terminar las instancias: %s"
@@ -1786,22 +1784,22 @@ msgstr ""
"Debes especificar xenapi_connection_url, xenapi_connection_username "
"(opcional), y xenapi_connection_password para usar connection_type=xenapi"
-#: ../nova/virt/xenapi_conn.py:534
+#: ../nova/virt/xenapi_conn.py:542
#, python-format
msgid "Task [%(name)s] %(task)s status: success %(result)s"
msgstr ""
-#: ../nova/virt/xenapi_conn.py:543
+#: ../nova/virt/xenapi_conn.py:551
#, python-format
msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s"
msgstr ""
-#: ../nova/virt/xenapi_conn.py:569 ../nova/virt/xenapi_conn.py:582
+#: ../nova/virt/xenapi_conn.py:577 ../nova/virt/xenapi_conn.py:590
#, python-format
msgid "Got exception: %s"
msgstr "Obtenida excepción %s"
-#: ../nova/volume/driver.py:352
+#: ../nova/volume/driver.py:359
#, python-format
msgid "Could not find iSCSI export for volume %s"
msgstr ""
@@ -2082,7 +2080,7 @@ msgstr "La instancia %d no tiene host"
msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances"
msgstr ""
-#: ../nova/compute/api.py:199
+#: ../nova/compute/api.py:202
#, python-format
msgid ""
"Instance quota exceeded. You can only run %s more instances of this type."
@@ -2090,11 +2088,11 @@ msgstr ""
"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de este "
"tipo."
-#: ../nova/compute/api.py:248
+#: ../nova/compute/api.py:251
msgid "Creating a raw instance"
msgstr "Creando una instancia raw"
-#: ../nova/compute/api.py:303
+#: ../nova/compute/api.py:306
#, python-format
msgid "Going to run %s instances..."
msgstr "Vamos a ejecutar %s insntacias..."
@@ -2104,7 +2102,7 @@ msgstr "Vamos a ejecutar %s insntacias..."
msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s"
msgstr ""
-#: ../nova/compute/api.py:814
+#: ../nova/compute/api.py:818
#, python-format
msgid "Going to try to terminate %s"
msgstr ""
@@ -2119,14 +2117,14 @@ msgstr "La instancia %d no se ha encontrado durante la finalización"
msgid "Instance %d is already being terminated"
msgstr "La instancia %d ha sido finalizada"
-#: ../nova/compute/api.py:1516
+#: ../nova/compute/api.py:1521
#, python-format
msgid "Invalid device specified: %s. Example device: /dev/vdb"
msgstr ""
"El dispositivo especificado no es válido: %s. Ejemplo de dispositivo: "
"/dev/vdb"
-#: ../nova/compute/api.py:1531
+#: ../nova/compute/api.py:1536
msgid "Volume isn't attached to anything!"
msgstr "¡El volumen no está unido a nada!"
@@ -2213,12 +2211,12 @@ msgstr ""
msgid "message %s"
msgstr "mensaje %s"
-#: ../nova/volume/driver.py:77
+#: ../nova/volume/driver.py:79
#, python-format
msgid "Recovering from a failed execute. Try number %s"
msgstr "Recuperandose de una ejecución fallida. Intenta el número %s"
-#: ../nova/volume/driver.py:87
+#: ../nova/volume/driver.py:89
#, python-format
msgid "volume group %s doesn't exist"
msgstr "el grupo de volumenes %s no existe"
@@ -2228,31 +2226,31 @@ msgstr "el grupo de volumenes %s no existe"
msgid "FAKE AOE: %s"
msgstr "Falso AOE: %s"
-#: ../nova/volume/driver.py:249 ../nova/volume/driver.py:832
+#: ../nova/volume/driver.py:251 ../nova/volume/driver.py:847
msgid "Skipping ensure_export. No iscsi_target "
msgstr ""
-#: ../nova/volume/driver.py:288 ../nova/volume/driver.py:297
-#: ../nova/volume/driver.py:876
+#: ../nova/volume/driver.py:295 ../nova/volume/driver.py:304
+#: ../nova/volume/driver.py:891
msgid "Skipping remove_export. No iscsi_target "
msgstr ""
-#: ../nova/volume/driver.py:456
+#: ../nova/volume/driver.py:463
#, python-format
msgid "FAKE ISCSI: %s"
msgstr "Falso ISCSI: %s"
-#: ../nova/volume/driver.py:468
+#: ../nova/volume/driver.py:475
#, python-format
msgid "rbd has no pool %s"
msgstr ""
-#: ../nova/volume/driver.py:535
+#: ../nova/volume/driver.py:545
#, python-format
msgid "Sheepdog is not working: %s"
msgstr ""
-#: ../nova/volume/driver.py:537
+#: ../nova/volume/driver.py:547
msgid "Sheepdog is not working"
msgstr ""
@@ -2294,7 +2292,7 @@ msgstr "La instancia %s no ha sido encontrada"
msgid "Dissassociated %s stale fixed ip(s)"
msgstr ""
-#: ../nova/network/manager.py:524
+#: ../nova/network/manager.py:525
msgid "setting network host"
msgstr "configurando la red del host"
@@ -2333,35 +2331,35 @@ msgstr ""
msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s"
msgstr ""
-#: ../nova/network/manager.py:871
+#: ../nova/network/manager.py:881
#, python-format
msgid "IP %s released that was not leased"
msgstr ""
-#: ../nova/network/manager.py:1279
+#: ../nova/network/manager.py:1289
msgid ""
"The sum between the number of networks and the vlan start cannot be greater "
"than 4094"
msgstr ""
-#: ../nova/virt/xenapi/volume_utils.py:159
+#: ../nova/virt/xenapi/volume_utils.py:157
#, python-format
msgid "Introducing %s..."
msgstr "Introduciendo %s..."
#: ../nova/virt/xenapi/volume_utils.py:103
-#: ../nova/virt/xenapi/volume_utils.py:172
+#: ../nova/virt/xenapi/volume_utils.py:170
#: ../nova/virt/xenapi/volumeops.py:157
#, python-format
msgid "Introduced %(label)s as %(sr_ref)s."
msgstr ""
#: ../nova/virt/xenapi/volume_utils.py:76
-#: ../nova/virt/xenapi/volume_utils.py:176
+#: ../nova/virt/xenapi/volume_utils.py:174
msgid "Unable to create Storage Repository"
msgstr "Imposible crear el repositorio de almacenamiento"
-#: ../nova/virt/xenapi/volume_utils.py:188
+#: ../nova/virt/xenapi/volume_utils.py:186
#, python-format
msgid "Unable to find SR from VBD %s"
msgstr "Imposible encontrar SR en VBD %s"
@@ -2371,12 +2369,12 @@ msgstr "Imposible encontrar SR en VBD %s"
msgid "Forgetting SR %s ... "
msgstr "Olvidando SR %s... "
-#: ../nova/virt/xenapi/volume_utils.py:230
+#: ../nova/virt/xenapi/volume_utils.py:228
#, python-format
msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s"
msgstr ""
-#: ../nova/virt/xenapi/volume_utils.py:236
+#: ../nova/virt/xenapi/volume_utils.py:234
#, python-format
msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s"
msgstr ""
@@ -2391,17 +2389,17 @@ msgstr "Olvidando SR %s completado."
msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s"
msgstr ""
-#: ../nova/virt/xenapi/volume_utils.py:251
+#: ../nova/virt/xenapi/volume_utils.py:249
#, python-format
msgid "Unable to introduce VDI on SR %s"
msgstr "Incapaz de insertar VDI en SR %s"
-#: ../nova/virt/xenapi/volume_utils.py:259
+#: ../nova/virt/xenapi/volume_utils.py:257
#, python-format
msgid "Unable to get record of VDI %s on"
msgstr "Imposible obtener copia del VDI %s en"
-#: ../nova/virt/xenapi/volume_utils.py:281
+#: ../nova/virt/xenapi/volume_utils.py:279
#, python-format
msgid "Unable to introduce VDI for SR %s"
msgstr "Inposible insertar VDI para SR %s"
@@ -2411,7 +2409,7 @@ msgstr "Inposible insertar VDI para SR %s"
msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s"
msgstr ""
-#: ../nova/virt/xenapi/volume_utils.py:358
+#: ../nova/virt/xenapi/volume_utils.py:356
#, python-format
msgid "Mountpoint cannot be translated: %s"
msgstr "Punto de montaje no puede ser traducido: %s"
@@ -2689,15 +2687,15 @@ msgstr ""
msgid "Service killed that has no database entry"
msgstr "Se detuvo un servicio sin entrada en la base de datos"
-#: ../nova/service.py:266
+#: ../nova/service.py:269
msgid "The service database object disappeared, Recreating it."
msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo."
-#: ../nova/service.py:278
+#: ../nova/service.py:284
msgid "Recovered model server connection!"
msgstr "Recuperada la conexión al servidor de modelos."
-#: ../nova/service.py:284
+#: ../nova/service.py:290
msgid "model server went away"
msgstr "el servidor de modelos se ha ido"
@@ -2741,7 +2739,7 @@ msgstr ""
msgid "The group at dn %s doesn't exist"
msgstr ""
-#: ../nova/exception.py:785
+#: ../nova/exception.py:790
#, python-format
msgid "User %(uid)s is already a member of the group %(group_dn)s"
msgstr "El usuario %(uid)s es actualmente miembro del grupo %(group_dn)s"