diff options
218 files changed, 29415 insertions, 14816 deletions
diff --git a/.bzrignore b/.bzrignore index b751ad825..14d8028f7 100644 --- a/.bzrignore +++ b/.bzrignore @@ -5,13 +5,7 @@ _trial_temp keys networks nova.sqlite -CA/cacert.pem -CA/crl.pem -CA/index.txt* -CA/openssl.cnf -CA/serial* -CA/newcerts/*.pem -CA/private/cakey.pem +CA nova/vcsversion.py *.DS_Store .project @@ -4,6 +4,7 @@ <anotherjesse@gmail.com> <jesse@dancelamb> <anotherjesse@gmail.com> <jesse@gigantor.local> <anotherjesse@gmail.com> <jesse@ubuntu> +<anotherjesse@gmail.com> <jesse@aire.local> <ant@openstack.org> <amesserl@rackspace.com> <Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com> <brian.lamar@rackspace.com> <brian.lamar@gmail.com> @@ -28,6 +29,7 @@ <matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local> <matt.dietz@rackspace.com> <mdietz@openstack> <mordred@inaugust.com> <mordred@hudson> +<naveedm9@gmail.com> <naveed.massjouni@rackspace.com> <nirmal.ranganathan@rackspace.com> <nirmal.ranganathan@rackspace.coom> <paul@openstack.org> <paul.voccio@rackspace.com> <paul@openstack.org> <pvoccio@castor.local> @@ -35,6 +37,7 @@ <rlane@wikimedia.org> <laner@controller> <sleepsonthefloor@gmail.com> <root@tonbuntu> <soren.hansen@rackspace.com> <soren@linux2go.dk> +<throughnothing@gmail.com> <will.wolf@rackspace.com> <todd@ansolabs.com> <todd@lapex> <todd@ansolabs.com> <todd@rubidine.com> <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in> @@ -43,5 +46,4 @@ <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <vishvananda@gmail.com> <root@mirror.nasanebula.net> <vishvananda@gmail.com> <root@ubuntu> -<naveedm9@gmail.com> <naveed.massjouni@rackspace.com> <vishvananda@gmail.com> <vishvananda@yahoo.com> @@ -1,3 +1,5 @@ +Alex Meade <alex.meade@rackspace.com> +Andrey Brindeyev <abrindeyev@griddynamics.com> Andy Smith <code@term.ie> Andy Southgate <andy.southgate@citrix.com> Anne Gentle <anne@openstack.org> @@ -15,6 +17,7 @@ Christian Berendt <berendt@b1-systems.de> Chuck Short <zulcss@ubuntu.com> Cory Wright <corywright@gmail.com> Dan Prince <dan.prince@rackspace.com> +Dave Walker <DaveWalker@ubuntu.com> David Pravec <David.Pravec@danix.org> Dean Troyer <dtroyer@gmail.com> Devin Carlen <devin.carlen@gmail.com> @@ -27,10 +30,13 @@ Gabe Westmaas <gabe.westmaas@rackspace.com> Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> Hisaki Ohara <hisaki.ohara@intel.com> Ilya Alekseyev <ialekseev@griddynamics.com> +Jason Koelker <jason@koelker.net> Jay Pipes <jaypipes@gmail.com> Jesse Andrews <anotherjesse@gmail.com> +Jimmy Bergman <jimmy@sigint.se> Joe Heck <heckj@mac.com> Joel Moore <joelbm24@gmail.com> +Johannes Erdfelt <johannes.erdfelt@rackspace.com> John Dewey <john@dewey.ws> John Tran <jtran@attinteractive.com> Jonathan Bryce <jbryce@jbryce.com> @@ -40,11 +46,14 @@ Josh Kearney <josh@jk0.org> Josh Kleinpeter <josh@kleinpeter.org> Joshua McKenty <jmckenty@gmail.com> Justin Santa Barbara <justin@fathomdb.com> +Justin Shepherd <jshepher@rackspace.com> Kei Masumoto <masumotok@nttdata.co.jp> Ken Pepple <ken.pepple@gmail.com> +Kevin Bringard <kbringard@attinteractive.com> Kevin L. Mitchell <kevin.mitchell@rackspace.com> Koji Iida <iida.koji@lab.ntt.co.jp> Lorin Hochstein <lorin@isi.edu> +Lvov Maxim <usrleon@gmail.com> Mark Washenberger <mark.washenberger@rackspace.com> Masanori Itoh <itoumsn@nttdata.co.jp> Matt Dietz <matt.dietz@rackspace.com> @@ -57,6 +66,7 @@ Nachi Ueno <ueno.nachi@lab.ntt.co.jp> Naveed Massjouni <naveedm9@gmail.com> Nirmal Ranganathan <nirmal.ranganathan@rackspace.com> Paul Voccio <paul@openstack.org> +Renuka Apte <renuka.apte@citrix.com> Ricardo Carrillo Cruz <emaildericky@gmail.com> Rick Clark <rick@openstack.org> Rick Harris <rconradharris@gmail.com> @@ -73,5 +83,8 @@ Trey Morris <trey.morris@rackspace.com> Tushar Patil <tushar.vitthal.patil@gmail.com> Vasiliy Shlykov <vash@vasiliyshlykov.org> Vishvananda Ishaya <vishvananda@gmail.com> +William Wolf <throughnothing@gmail.com> +Yoshiaki Tamura <yoshi@midokura.jp> Youcef Laribi <Youcef.Laribi@eu.citrix.com> +Yuriy Taraday <yorik.sar@gmail.com> Zhixue Wu <Zhixue.Wu@citrix.com> @@ -50,17 +50,24 @@ Human Alphabetical Order Examples Docstrings ---------- - """Summary of the function, class or method, less than 80 characters. + """A one line docstring looks like this and ends in a period.""" - New paragraph after newline that explains in more detail any general - information about the function, class or method. After this, if defining - parameters and return types use the Sphinx format. After that an extra - newline then close the quotations. + + """A multiline docstring has a one-line summary, less than 80 characters. + + Then a new paragraph after a newline that explains in more detail any + general information about the function, class or method. Example usages + are also great to have here if it is a complex class for function. After + you have finished your descriptions add an extra newline and close the + quotations. When writing the docstring for a class, an extra line should be placed after the closing quotations. For more in-depth explanations for these decisions see http://www.python.org/dev/peps/pep-0257/ + If you are going to describe parameters and return values, use Sphinx, the + appropriate syntax is as follows. + :param foo: the foo parameter :param bar: the bar parameter :returns: description of the return value diff --git a/MANIFEST.in b/MANIFEST.in index e7a6e7da4..4e145de75 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -35,6 +35,7 @@ include nova/tests/bundle/1mb.manifest.xml include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml include nova/tests/bundle/1mb.part.0 include nova/tests/bundle/1mb.part.1 +include nova/tests/public_key/* include nova/tests/db/nova.austin.sqlite include plugins/xenapi/README include plugins/xenapi/etc/xapi.d/plugins/objectstore diff --git a/bin/nova-compute b/bin/nova-compute index 95fa393b1..cd7c78def 100755 --- a/bin/nova-compute +++ b/bin/nova-compute @@ -28,11 +28,11 @@ import sys # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) gettext.install('nova', unicode=1) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index f42dfd6b5..5926b97de 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -108,6 +108,13 @@ def main(): interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags + + #if FLAGS.fake_rabbit: + # LOG.debug(_("leasing ip")) + # network_manager = utils.import_object(FLAGS.network_manager) + ## reload(fake_flags) + # from nova.tests import fake_flags + action = argv[1] if action in ['add', 'del', 'old']: mac = argv[2] diff --git a/bin/nova-manage b/bin/nova-manage index 6789efba8..26c0d776c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -58,7 +58,6 @@ import gettext import glob import json import os -import re import sys import time @@ -66,11 +65,11 @@ import IPy # If ../nova/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) gettext.install('nova', unicode=1) @@ -83,6 +82,7 @@ from nova import log as logging from nova import quota from nova import rpc from nova import utils +from nova import version from nova.api.ec2 import ec2utils from nova.auth import manager from nova.cloudpipe import pipelib @@ -97,7 +97,7 @@ flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') flags.DECLARE('images_path', 'nova.image.local') -flags.DECLARE('libvirt_type', 'nova.virt.libvirt_conn') +flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) @@ -151,7 +151,7 @@ class VpnCommands(object): state = 'up' print address, print vpn['host'], - print vpn['ec2_id'], + print ec2utils.id_to_ec2_id(vpn['id']), print vpn['state_description'], print state else: @@ -362,34 +362,54 @@ class ProjectCommands(object): def add(self, project_id, user_id): """Adds user to project arguments: project_id user_id""" - self.manager.add_to_project(user_id, project_id) + try: + self.manager.add_to_project(user_id, project_id) + except exception.UserNotFound as ex: + print ex + raise def create(self, name, project_manager, description=None): """Creates a new project arguments: name project_manager [description]""" - self.manager.create_project(name, project_manager, description) + try: + self.manager.create_project(name, project_manager, description) + except exception.UserNotFound as ex: + print ex + raise def modify(self, name, project_manager, description=None): """Modifies a project arguments: name project_manager [description]""" - self.manager.modify_project(name, project_manager, description) + try: + self.manager.modify_project(name, project_manager, description) + except exception.UserNotFound as ex: + print ex + raise def delete(self, name): """Deletes an existing project arguments: name""" - self.manager.delete_project(name) + try: + self.manager.delete_project(name) + except exception.ProjectNotFound as ex: + print ex + raise def environment(self, project_id, user_id, filename='novarc'): """Exports environment variables to an sourcable file arguments: project_id user_id [filename='novarc]""" - rc = self.manager.get_environment_rc(user_id, project_id) + try: + rc = self.manager.get_environment_rc(user_id, project_id) + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex + raise with open(filename, 'w') as f: f.write(rc) - def list(self): + def list(self, username=None): """Lists all projects - arguments: <none>""" - for project in self.manager.get_projects(): + arguments: [username]""" + for project in self.manager.get_projects(username): print project.name def quota(self, project_id, key=None, value=None): @@ -397,19 +417,26 @@ class ProjectCommands(object): arguments: project_id [key] [value]""" ctxt = context.get_admin_context() if key: - quo = {'project_id': project_id, key: value} + if value.lower() == 'unlimited': + value = None try: - db.quota_update(ctxt, project_id, quo) - except exception.NotFound: - db.quota_create(ctxt, quo) - project_quota = quota.get_quota(ctxt, project_id) + db.quota_update(ctxt, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(ctxt, project_id, key, value) + project_quota = quota.get_project_quotas(ctxt, project_id) for key, value in project_quota.iteritems(): + if value is None: + value = 'unlimited' print '%s: %s' % (key, value) def remove(self, project_id, user_id): """Removes user from project arguments: project_id user_id""" - self.manager.remove_from_project(user_id, project_id) + try: + self.manager.remove_from_project(user_id, project_id) + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex + raise def scrub(self, project_id): """Deletes data associated with project @@ -428,6 +455,9 @@ class ProjectCommands(object): zip_file = self.manager.get_credentials(user_id, project_id) with open(filename, 'w') as f: f.write(zip_file) + except (exception.UserNotFound, exception.ProjectNotFound) as ex: + print ex + raise except db.api.NoMoreNetworks: print _('No more networks available. If this is a new ' 'installation, you need\nto call something like this:\n\n' @@ -449,7 +479,7 @@ class FixedIpCommands(object): ctxt = context.get_admin_context() try: - if host == None: + if host is None: fixed_ips = db.fixed_ip_get_all(ctxt) else: fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host) @@ -499,7 +529,7 @@ class FloatingIpCommands(object): """Lists all floating ips (optionally by host) arguments: [host]""" ctxt = context.get_admin_context() - if host == None: + if host is None: floating_ips = db.floating_ip_get_all(ctxt) else: floating_ips = db.floating_ip_get_all_by_host(ctxt, host) @@ -523,8 +553,10 @@ class NetworkCommands(object): [network_size=FLAG], [vlan_start=FLAG], [vpn_start=FLAG], [fixed_range_v6=FLAG]""" if not fixed_range: - raise TypeError(_('Fixed range in the form of 10.0.0.0/8 is ' - 'required to create networks.')) + msg = _('Fixed range in the form of 10.0.0.0/8 is ' + 'required to create networks.') + print msg + raise TypeError(msg) if not num_networks: num_networks = FLAGS.num_networks if not network_size: @@ -536,14 +568,18 @@ class NetworkCommands(object): if not fixed_range_v6: fixed_range_v6 = FLAGS.fixed_range_v6 net_manager = utils.import_object(FLAGS.network_manager) - net_manager.create_networks(context.get_admin_context(), - cidr=fixed_range, - num_networks=int(num_networks), - network_size=int(network_size), - vlan_start=int(vlan_start), - vpn_start=int(vpn_start), - cidr_v6=fixed_range_v6, - label=label) + try: + net_manager.create_networks(context.get_admin_context(), + cidr=fixed_range, + num_networks=int(num_networks), + network_size=int(network_size), + vlan_start=int(vlan_start), + vpn_start=int(vpn_start), + cidr_v6=fixed_range_v6, + label=label) + except ValueError, e: + print e + raise e def list(self): """List all created networks""" @@ -570,6 +606,49 @@ class NetworkCommands(object): class VmCommands(object): """Class for mangaging VM instances.""" + def list(self, host=None): + """Show a list of all instances + + :param host: show all instance on specified host. + :param instance: show specificed instance. + """ + print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ + " %-10s %-10s %-10s %-5s" % ( + _('instance'), + _('node'), + _('type'), + _('state'), + _('launched'), + _('image'), + _('kernel'), + _('ramdisk'), + _('project'), + _('user'), + _('zone'), + _('index')) + + if host is None: + instances = db.instance_get_all(context.get_admin_context()) + else: + instances = db.instance_get_all_by_host( + context.get_admin_context(), host) + + for instance in instances: + print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ + " %-10s %-10s %-10s %-5d" % ( + instance['hostname'], + instance['host'], + instance['instance_type'], + instance['state_description'], + instance['launched_at'], + instance['image_id'], + instance['kernel_id'], + instance['ramdisk_id'], + instance['project_id'], + instance['user_id'], + instance['availability_zone'], + instance['launch_index']) + def live_migration(self, ec2_id, dest): """Migrates a running instance to a new machine. @@ -701,15 +780,6 @@ class ServiceCommands(object): {"method": "update_available_resource"}) -class LogCommands(object): - def request(self, request_id, logfile='/var/log/nova.log'): - """Show all fields in the log for the given request. Assumes you - haven't changed the log format too much. - ARGS: request_id [logfile]""" - lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id)) - print re.sub('#012', "\n", "\n".join(lines)) - - class DbCommands(object): """Class for managing the database.""" @@ -725,47 +795,15 @@ class DbCommands(object): print migration.db_version() -class InstanceCommands(object): - """Class for managing instances.""" - - def list(self, host=None, instance=None): - """Show a list of all instances""" - print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ - " %-10s %-10s %-10s %-5s" % ( - _('instance'), - _('node'), - _('type'), - _('state'), - _('launched'), - _('image'), - _('kernel'), - _('ramdisk'), - _('project'), - _('user'), - _('zone'), - _('index')) +class VersionCommands(object): + """Class for exposing the codebase version.""" - if host == None: - instances = db.instance_get_all(context.get_admin_context()) - else: - instances = db.instance_get_all_by_host( - context.get_admin_context(), host) + def __init__(self): + pass - for instance in instances: - print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ - " %-10s %-10s %-10s %-5d" % ( - instance['hostname'], - instance['host'], - instance['instance_type'], - instance['state_description'], - instance['launched_at'], - instance['image_id'], - instance['kernel_id'], - instance['ramdisk_id'], - instance['project_id'], - instance['user_id'], - instance['availability_zone'], - instance['launch_index']) + def list(self): + print _("%s (%s)") %\ + (version.version_string(), version.version_string_with_vcs()) class VolumeCommands(object): @@ -818,11 +856,11 @@ class VolumeCommands(object): class InstanceTypeCommands(object): """Class for managing instance types / flavors.""" - def _print_instance_types(self, n, val): + def _print_instance_types(self, name, val): deleted = ('', ', inactive')[val["deleted"] == 1] print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, " "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % ( - n, val["memory_mb"], val["vcpus"], val["local_gb"], + name, val["memory_mb"], val["vcpus"], val["local_gb"], val["flavorid"], val["swap"], val["rxtx_quota"], val["rxtx_cap"], deleted) @@ -836,11 +874,17 @@ class InstanceTypeCommands(object): instance_types.create(name, memory, vcpus, local_gb, flavorid, swap, rxtx_quota, rxtx_cap) except exception.InvalidInputException: - print "Must supply valid parameters to create instance type" + print "Must supply valid parameters to create instance_type" print e sys.exit(1) - except exception.DBError, e: - print "DB Error: %s" % e + except exception.ApiError, e: + print "\n\n" + print "\n%s" % e + print "Please ensure instance_type name and flavorid are unique." + print "To complete remove a instance_type, use the --purge flag:" + print "\n # nova-manage instance_type delete <name> --purge\n" + print "Currently defined instance_type names and flavorids:" + self.list("--all") sys.exit(2) except: print "Unknown error" @@ -873,12 +917,12 @@ class InstanceTypeCommands(object): """Lists all active or specific instance types / flavors arguments: [name]""" try: - if name == None: + if name is None: inst_types = instance_types.get_all_types() elif name == "--all": inst_types = instance_types.get_all_types(True) else: - inst_types = instance_types.get_instance_type(name) + inst_types = instance_types.get_instance_type_by_name(name) except exception.DBError, e: _db_error(e) if isinstance(inst_types.values()[0], dict): @@ -894,20 +938,17 @@ class ImageCommands(object): def __init__(self, *args, **kwargs): self.image_service = utils.import_object(FLAGS.image_service) - def _register(self, image_type, disk_format, container_format, + def _register(self, container_format, disk_format, path, owner, name=None, is_public='T', architecture='x86_64', kernel_id=None, ramdisk_id=None): - meta = {'is_public': True, + meta = {'is_public': (is_public == 'T'), 'name': name, - 'disk_format': disk_format, 'container_format': container_format, + 'disk_format': disk_format, 'properties': {'image_state': 'available', - 'owner_id': owner, - 'type': image_type, + 'project_id': owner, 'architecture': architecture, - 'image_location': 'local', - 'is_public': (is_public == 'T')}} - print image_type, meta + 'image_location': 'local'}} if kernel_id: meta['properties']['kernel_id'] = int(kernel_id) if ramdisk_id: @@ -932,16 +973,18 @@ class ImageCommands(object): ramdisk_id = self.ramdisk_register(ramdisk, owner, None, is_public, architecture) self.image_register(image, owner, name, is_public, - architecture, kernel_id, ramdisk_id) + architecture, 'ami', 'ami', + kernel_id, ramdisk_id) def image_register(self, path, owner, name=None, is_public='T', - architecture='x86_64', kernel_id=None, ramdisk_id=None, - disk_format='ami', container_format='ami'): + architecture='x86_64', container_format='bare', + disk_format='raw', kernel_id=None, ramdisk_id=None): """Uploads an image into the image_service arguments: path owner [name] [is_public='T'] [architecture='x86_64'] + [container_format='bare'] [disk_format='raw'] [kernel_id=None] [ramdisk_id=None] - [disk_format='ami'] [container_format='ami']""" - return self._register('machine', disk_format, container_format, path, + """ + return self._register(container_format, disk_format, path, owner, name, is_public, architecture, kernel_id, ramdisk_id) @@ -950,7 +993,7 @@ class ImageCommands(object): """Uploads a kernel into the image_service arguments: path owner [name] [is_public='T'] [architecture='x86_64'] """ - return self._register('kernel', 'aki', 'aki', path, owner, name, + return self._register('aki', 'aki', path, owner, name, is_public, architecture) def ramdisk_register(self, path, owner, name=None, is_public='T', @@ -958,14 +1001,14 @@ class ImageCommands(object): """Uploads a ramdisk into the image_service arguments: path owner [name] [is_public='T'] [architecture='x86_64'] """ - return self._register('ramdisk', 'ari', 'ari', path, owner, name, + return self._register('ari', 'ari', path, owner, name, is_public, architecture) def _lookup(self, old_image_id): try: internal_id = ec2utils.ec2_id_to_id(old_image_id) image = self.image_service.show(context, internal_id) - except exception.NotFound: + except (exception.InvalidEc2Id, exception.ImageNotFound): image = self.image_service.show_by_name(context, old_image_id) return image['id'] @@ -975,16 +1018,17 @@ class ImageCommands(object): 'ramdisk': 'ari'} container_format = mapping[old['type']] disk_format = container_format + if container_format == 'ami' and not old.get('kernelId'): + container_format = 'bare' + disk_format = 'raw' new = {'disk_format': disk_format, 'container_format': container_format, - 'is_public': True, + 'is_public': old['isPublic'], 'name': old['imageId'], 'properties': {'image_state': old['imageState'], - 'owner_id': old['imageOwnerId'], + 'project_id': old['imageOwnerId'], 'architecture': old['architecture'], - 'type': old['type'], - 'image_location': old['imageLocation'], - 'is_public': old['isPublic']}} + 'image_location': old['imageLocation']}} if old.get('kernelId'): new['properties']['kernel_id'] = self._lookup(old['kernelId']) if old.get('ramdiskId'): @@ -1018,7 +1062,7 @@ class ImageCommands(object): if (FLAGS.image_service == 'nova.image.local.LocalImageService' and directory == os.path.abspath(FLAGS.images_path)): new_dir = "%s_bak" % directory - os.move(directory, new_dir) + os.rename(directory, new_dir) os.mkdir(directory) directory = new_dir for fn in glob.glob("%s/*/info.json" % directory): @@ -1030,7 +1074,7 @@ class ImageCommands(object): machine_images[image_path] = image_metadata else: other_images[image_path] = image_metadata - except Exception as exc: + except Exception: print _("Failed to load %(fn)s.") % locals() # NOTE(vish): do kernels and ramdisks first so images self._convert_images(other_images) @@ -1049,13 +1093,12 @@ CATEGORIES = [ ('network', NetworkCommands), ('vm', VmCommands), ('service', ServiceCommands), - ('log', LogCommands), ('db', DbCommands), ('volume', VolumeCommands), ('instance_type', InstanceTypeCommands), ('image', ImageCommands), ('flavor', InstanceTypeCommands), - ('instance', InstanceCommands)] + ('version', VersionCommands)] def lazy_match(name, key_value_tuples): @@ -1097,6 +1140,8 @@ def main(): script_name = argv.pop(0) if len(argv) < 1: + print _("\nOpenStack Nova version: %s (%s)\n") %\ + (version.version_string(), version.version_string_with_vcs()) print script_name + " category action [<args>]" print _("Available categories:") for k, _v in CATEGORIES: @@ -65,7 +65,7 @@ def format_help(d): indent = MAX_INDENT - 6 out = [] - for k, v in d.iteritems(): + for k, v in sorted(d.iteritems()): if (len(k) + 6) > MAX_INDENT: out.extend([' %s' % k]) initial_indent = ' ' * (indent + 6) diff --git a/doc/source/_theme/layout.html b/doc/source/_theme/layout.html index 0a37a7943..b28edb364 100644 --- a/doc/source/_theme/layout.html +++ b/doc/source/_theme/layout.html @@ -73,7 +73,7 @@ <script type="text/javascript">$('#searchbox').show(0);</script> <p class="triangle-border right"> - Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.1">Nova 2011.1 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too. + Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.2">Nova 2011.2 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too. </p> {%- endif %} diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst index 4f5d91e28..15d3160b7 100644 --- a/doc/source/devref/cloudpipe.rst +++ b/doc/source/devref/cloudpipe.rst @@ -38,6 +38,52 @@ The cloudpipe image is basically just a linux instance with openvpn installed. It is also useful to have a cron script that will periodically redownload the metadata and copy the new crl. This will keep revoked users from connecting and will disconnect any users that are connected with revoked certificates when their connection is renegotiated (every hour). +Creating a Cloudpipe Image +-------------------------- + +Making a cloudpipe image is relatively easy. + +# install openvpn on a base ubuntu image. +# set up a server.conf.template in /etc/openvpn/ + +.. literalinclude:: server.conf.template + :language: bash + :linenos: + +# set up.sh in /etc/openvpn/ + +.. literalinclude:: up.sh + :language: bash + :linenos: + +# set down.sh in /etc/openvpn/ + +.. literalinclude:: down.sh + :language: bash + :linenos: + +# download and run the payload on boot from /etc/rc.local + +.. literalinclude:: rc.local + :language: bash + :linenos: + +# setup /etc/network/interfaces + +.. literalinclude:: interfaces + :language: bash + :linenos: + +# register the image and set the image id in your flagfile:: + + --vpn_image_id=ami-xxxxxxxx + +# you should set a few other flags to make vpns work properly:: + + --use_project_ca + --cnt_vpn_clients=5 + + Cloudpipe Launch ---------------- @@ -63,6 +109,31 @@ Certificates and Revocation If the use_project_ca flag is set (required to for cloudpipes to work securely), then each project has its own ca. This ca is used to sign the certificate for the vpn, and is also passed to the user for bundling images. When a certificate is revoked using nova-manage, a new Certificate Revocation List (crl) is generated. As long as cloudpipe has an updated crl, it will block revoked users from connecting to the vpn. +The userdata for cloudpipe isn't currently updated when certs are revoked, so it is necessary to restart the cloudpipe instance if a user's credentials are revoked. + + +Restarting Cloudpipe VPN +------------------------ + +You can reboot a cloudpipe vpn through the api if something goes wrong (using euca-reboot-instances for example), but if you generate a new crl, you will have to terminate it and start it again using nova-manage vpn run. The cloudpipe instance always gets the first ip in the subnet and it can take up to 10 minutes for the ip to be recovered. If you try to start the new vpn instance too soon, the instance will fail to start because of a NoMoreAddresses error. If you can't wait 10 minutes, you can manually update the ip with something like the following (use the right ip for the project):: + + euca-terminate-instances <instance_id> + mysql nova -e "update fixed_ips set allocated=0, leased=0, instance_id=NULL where fixed_ip='10.0.0.2'" + +You also will need to terminate the dnsmasq running for the user (make sure you use the right pid file):: + + sudo kill `cat /var/lib/nova/br100.pid` + +Now you should be able to re-run the vpn:: + + nova-manage vpn run <project_id> + + +Logging into Cloudpipe VPN +-------------------------- + +The keypair that was used to launch the cloudpipe instance should be in the keys/<project_id> folder. You can use this key to log into the cloudpipe instance for debugging purposes. + The :mod:`nova.cloudpipe.pipelib` Module ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/devref/down.sh b/doc/source/devref/down.sh new file mode 100644 index 000000000..5c1888870 --- /dev/null +++ b/doc/source/devref/down.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +BR=$1 +DEV=$2 + +/usr/sbin/brctl delif $BR $DEV +/sbin/ifconfig $DEV down diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 9613ba990..0a5a7a4d6 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -35,6 +35,7 @@ Programming Concepts .. toctree:: :maxdepth: 3 + zone rabbit API Reference diff --git a/doc/source/devref/interfaces b/doc/source/devref/interfaces new file mode 100644 index 000000000..b7116aeb7 --- /dev/null +++ b/doc/source/devref/interfaces @@ -0,0 +1,17 @@ +# This file describes the network interfaces available on your system +# and how to activate them. For more information, see interfaces(5). + +# The loopback network interface +auto lo +iface lo inet loopback + +# The primary network interface +auto eth0 +iface eth0 inet manual + up ifconfig $IFACE 0.0.0.0 up + down ifconfig $IFACE down + +auto br0 +iface br0 inet dhcp + bridge_ports eth0 + diff --git a/doc/source/devref/rc.local b/doc/source/devref/rc.local new file mode 100644 index 000000000..d1ccf0cbc --- /dev/null +++ b/doc/source/devref/rc.local @@ -0,0 +1,36 @@ +#!/bin/sh -e +# +# rc.local +# +# This script is executed at the end of each multiuser runlevel. +# Make sure that the script will "exit 0" on success or any other +# value on error. +# +# In order to enable or disable this script just change the execution +# bits. +# +# By default this script does nothing. +####### These lines go at the end of /etc/rc.local ####### +. /lib/lsb/init-functions + +echo Downloading payload from userdata +wget http://169.254.169.254/latest/user-data -O /tmp/payload.b64 +echo Decrypting base64 payload +openssl enc -d -base64 -in /tmp/payload.b64 -out /tmp/payload.zip + +mkdir -p /tmp/payload +echo Unzipping payload file +unzip -o /tmp/payload.zip -d /tmp/payload/ + +# if the autorun.sh script exists, run it +if [ -e /tmp/payload/autorun.sh ]; then + echo Running autorun.sh + cd /tmp/payload + sh /tmp/payload/autorun.sh + +else + echo rc.local : No autorun script to run +fi + + +exit 0 diff --git a/doc/source/devref/server.conf.template b/doc/source/devref/server.conf.template new file mode 100644 index 000000000..feee3185b --- /dev/null +++ b/doc/source/devref/server.conf.template @@ -0,0 +1,34 @@ +port 1194 +proto udp +dev tap0 +up "/etc/openvpn/up.sh br0" +down "/etc/openvpn/down.sh br0" + +persist-key +persist-tun + +ca ca.crt +cert server.crt +key server.key # This file should be kept secret + +dh dh1024.pem +ifconfig-pool-persist ipp.txt + +server-bridge VPN_IP DHCP_SUBNET DHCP_LOWER DHCP_UPPER + +client-to-client +keepalive 10 120 +comp-lzo + +max-clients 1 + +user nobody +group nogroup + +persist-key +persist-tun + +status openvpn-status.log + +verb 3 +mute 20
\ No newline at end of file diff --git a/doc/source/devref/up.sh b/doc/source/devref/up.sh new file mode 100644 index 000000000..073a58e15 --- /dev/null +++ b/doc/source/devref/up.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +BR=$1 +DEV=$2 +MTU=$3 +/sbin/ifconfig $DEV mtu $MTU promisc up +/usr/sbin/brctl addif $BR $DEV diff --git a/doc/source/devref/zone.rst b/doc/source/devref/zone.rst new file mode 100644 index 000000000..263560ee2 --- /dev/null +++ b/doc/source/devref/zone.rst @@ -0,0 +1,127 @@ +.. + Copyright 2010-2011 OpenStack LLC + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +Zones +===== + +A Nova deployment is called a Zone. A Zone allows you to partition your deployments into logical groups for load balancing and instance distribution. At the very least a Zone requires an API node, a Scheduler node, a database and RabbitMQ. Pushed further a Zone may contain many API nodes, many Scheduler, Volume, Network and Compute nodes as well as a cluster of databases and RabbitMQ servers. + +The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion. + +Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones. + +Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones. + + +Capabilities +------------ +Routing between Zones is based on the Capabilities of that Zone. Capabilities are nothing more than key/value pairs. Values are multi-value, with each value separated with a semicolon (`;`). When expressed as a string they take the form: + +:: + + key=value;value;value, key=value;value;value + +Zones have Capabilities which are general to the Zone and are set via `--zone_capabilities` flag. Zones also have dynamic per-service Capabilities. Services derived from `nova.manager.SchedulerDependentManager` (such as Compute, Volume and Network) can set these capabilities by calling the `update_service_capabilities()` method on their `Manager` base class. These capabilities will be periodically sent to the Scheduler service automatically. The rate at which these updates are sent is controlled by the `--periodic_interval` flag. + +Flow within a Zone +------------------ +The brunt of the work within a Zone is done in the Scheduler Service. The Scheduler is responsible for: +- collecting capability messages from the Compute, Volume and Network nodes, +- polling the child Zones for their status and +- providing data to the Distributed Scheduler for performing load balancing calculations + +Inter-service communication within a Zone is done with RabbitMQ. Each class of Service (Compute, Volume and Network) has both a named message exchange (particular to that host) and a general message exchange (particular to that class of service). Messages sent to these exchanges are picked off in round-robin fashion. Zones introduce a new fan-out exchange per service. Messages sent to the fan-out exchange are picked up by all services of a particular class. This fan-out exchange is used by the Scheduler services to receive capability messages from the Compute, Volume and Network nodes. + +These capability messages are received by the Scheduler services and stored in the `ZoneManager` object. The SchedulerManager object has a reference to the `ZoneManager` it can use for load balancing. + +The `ZoneManager` also polls the child Zones periodically to gather their capabilities to aid in decision making. This is done via the OpenStack API `/v1.0/zones/info` REST call. This also captures the name of each child Zone. The Zone name is set via the `--zone_name` flag (and defaults to "nova"). + +Zone administrative functions +----------------------------- +Zone administrative operations are usually done using python-novaclient_ + +.. _python-novaclient: https://github.com/rackspace/python-novaclient + +In order to use the Zone operations, be sure to enable administrator operations in OpenStack API by setting the `--allow_admin_api=true` flag. + +Finally you need to enable Zone Forwarding. This will be used by the Distributed Scheduler initiative currently underway. Set `--enable_zone_routing=true` to enable this feature. + +Find out about this Zone +------------------------ +In any Zone you can find the Zone's name and capabilities with the ``nova zone-info`` command. + +:: + + alice@novadev:~$ nova zone-info + +-----------------+---------------+ + | Property | Value | + +-----------------+---------------+ + | compute_cpu | 0.7,0.7 | + | compute_disk | 123000,123000 | + | compute_network | 800,800 | + | hypervisor | xenserver | + | name | nova | + | network_cpu | 0.7,0.7 | + | network_disk | 123000,123000 | + | network_network | 800,800 | + | os | linux | + +-----------------+---------------+ + +This equates to a GET operation on `.../zones/info`. If you have no child Zones defined you'll usually only get back the default `name`, `hypervisor` and `os` capabilities. Otherwise you'll get back a tuple of min, max values for each capabilities of all the hosts of all the services running in the child zone. These take the `<service>_<capability> = <min>,<max>` format. + +Adding a child Zone +------------------- +Any Zone can be a parent Zone. Children are associated to a Zone. The Zone where this command originates from is known as the Parent Zone. Routing is only ever conducted from a Zone to its children, never the other direction. From a parent zone you can add a child zone with the following command: + +:: + + nova zone-add <child zone api url> <username> <nova api key> + +You can get the `child zone api url`, `nova api key` and `username` from the `novarc` file in the child zone. For example: + +:: + + export NOVA_API_KEY="3bd1af06-6435-4e23-a827-413b2eb86934" + export NOVA_USERNAME="alice" + export NOVA_URL="http://192.168.2.120:8774/v1.0/" + + +This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. + +Getting a list of child Zones +----------------------------- + +:: + + nova zone-list + + alice@novadev:~$ nova zone-list + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | ID | Name | Is Active | Capabilities | API URL | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + | 2 | zone1 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.108:8774/v1.0/ | + | 3 | zone2 | True | hypervisor=xenserver;kvm, os=linux;windows | http://192.168.2.115:8774/v1.0/ | + +----+-------+-----------+--------------------------------------------+---------------------------------+ + +This equates to a GET operation to `.../zones`. + +Removing a child Zone +--------------------- +:: + + nova zone-delete <N> + +This equates to a DELETE call to `.../zones/N`. The Zone with ID=N will be removed. This will only remove the zone entry from the current (parent) Zone, no child Zones are affected. Removing a Child Zone doesn't affect any other part of the hierarchy. diff --git a/doc/source/man/novamanage.rst b/doc/source/man/novamanage.rst index 1d8446f08..397cc8e80 100644 --- a/doc/source/man/novamanage.rst +++ b/doc/source/man/novamanage.rst @@ -6,7 +6,7 @@ nova-manage control and manage cloud computer instances and images ------------------------------------------------------ -:Author: nova@lists.launchpad.net +:Author: openstack@lists.launchpad.net :Date: 2010-11-16 :Copyright: OpenStack LLC :Version: 0.1 @@ -121,7 +121,7 @@ Nova Role nova-manage role <action> [<argument>] ``nova-manage role add <username> <rolename> <(optional) projectname>`` - Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: admin, itsec, projectmanager, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. + Add a user to either a global or project-based role with the indicated <rolename> assigned to the named user. Role names can be one of the following five roles: cloudadmin, itsec, sysadmin, netadmin, developer. If you add the project name as the last argument then the role is assigned just for that project, otherwise the user is assigned the named role for all projects. ``nova-manage role has <username> <projectname>`` Checks the user or project and responds with True if the user has a global role with a particular project. @@ -240,6 +240,16 @@ Nova Images Converts all images in directory from the old (Bexar) format to the new format. +Nova VM +~~~~~~~~~~~ + +``nova-manage vm list [host]`` + Show a list of all instances. Accepts optional hostname (to show only instances on specific host). + +``nova-manage live-migration <ec2_id> <destination host name>`` + Live migrate instance from current host to destination host. Requires instance id (which comes from euca-describe-instance) and destination host name (which can be found from nova-manage service list). + + FILES ======== diff --git a/doc/source/runnova/flags.rst b/doc/source/runnova/flags.rst index 1bfa022d9..3d16e1303 100644 --- a/doc/source/runnova/flags.rst +++ b/doc/source/runnova/flags.rst @@ -20,174 +20,4 @@ Flags and Flagfiles Nova uses a configuration file containing flags located in /etc/nova/nova.conf. You can get the most recent listing of avaialble flags by running nova-(servicename) --help, for example, nova-api --help. -Here's a list of available flags and their default settings. - - --ajax_console_proxy_port: port that ajax_console_proxy binds - (default: '8000') - --ajax_console_proxy_topic: the topic ajax proxy nodes listen on - (default: 'ajax_proxy') - --ajax_console_proxy_url: location of ajax console proxy, in the form - "http://127.0.0.1:8000" - (default: 'http://127.0.0.1:8000') - --auth_token_ttl: Seconds for auth tokens to linger - (default: '3600') - (an integer) - --aws_access_key_id: AWS Access ID - (default: 'admin') - --aws_secret_access_key: AWS Access Key - (default: 'admin') - --compute_manager: Manager for compute - (default: 'nova.compute.manager.ComputeManager') - --compute_topic: the topic compute nodes listen on - (default: 'compute') - --connection_type: libvirt, xenapi or fake - (default: 'libvirt') - --console_manager: Manager for console proxy - (default: 'nova.console.manager.ConsoleProxyManager') - --console_topic: the topic console proxy nodes listen on - (default: 'console') - --control_exchange: the main exchange to connect to - (default: 'nova') - --db_backend: The backend to use for db - (default: 'sqlalchemy') - --default_image: default image to use, testing only - (default: 'ami-11111') - --default_instance_type: default instance type to use, testing only - (default: 'm1.small') - --default_log_levels: list of logger=LEVEL pairs - (default: 'amqplib=WARN,sqlalchemy=WARN,eventlet.wsgi.server=WARN') - (a comma separated list) - --default_project: default project for openstack - (default: 'openstack') - --ec2_dmz_host: internal ip of api server - (default: '$my_ip') - --ec2_host: ip of api server - (default: '$my_ip') - --ec2_path: suffix for ec2 - (default: '/services/Cloud') - --ec2_port: cloud controller port - (default: '8773') - (an integer) - --ec2_scheme: prefix for ec2 - (default: 'http') - --[no]enable_new_services: Services to be added to the available pool on - create - (default: 'true') - --[no]fake_network: should we use fake network devices and addresses - (default: 'false') - --[no]fake_rabbit: use a fake rabbit - (default: 'false') - --glance_host: glance host - (default: '$my_ip') - --glance_port: glance port - (default: '9292') - (an integer) - -?,--[no]help: show this help - --[no]helpshort: show usage only for this module - --[no]helpxml: like --help, but generates XML output - --host: name of this node - (default: 'osdemo03') - --image_service: The service to use for retrieving and searching for images. - (default: 'nova.image.s3.S3ImageService') - --instance_name_template: Template string to be used to generate instance - names - (default: 'instance-%08x') - --logfile: output to named file - --logging_context_format_string: format string to use for log messages with - context - (default: '%(asctime)s %(levelname)s %(name)s [%(request_id)s %(user)s - %(project)s] %(message)s') - --logging_debug_format_suffix: data to append to log format when level is - DEBUG - (default: 'from %(processName)s (pid=%(process)d) %(funcName)s - %(pathname)s:%(lineno)d') - --logging_default_format_string: format string to use for log messages without - context - (default: '%(asctime)s %(levelname)s %(name)s [-] %(message)s') - --logging_exception_prefix: prefix each line of exception output with this - format - (default: '(%(name)s): TRACE: ') - --my_ip: host ip address - (default: '184.106.73.68') - --network_manager: Manager for network - (default: 'nova.network.manager.VlanManager') - --network_topic: the topic network nodes listen on - (default: 'network') - --node_availability_zone: availability zone of this node - (default: 'nova') - --null_kernel: kernel image that indicates not to use a kernel, but to use a - raw disk image instead - (default: 'nokernel') - --osapi_host: ip of api server - (default: '$my_ip') - --osapi_path: suffix for openstack - (default: '/v1.0/') - --osapi_port: OpenStack API port - (default: '8774') - (an integer) - --osapi_scheme: prefix for openstack - (default: 'http') - --periodic_interval: seconds between running periodic tasks - (default: '60') - (a positive integer) - --pidfile: pidfile to use for this service - --rabbit_host: rabbit host - (default: 'localhost') - --rabbit_max_retries: rabbit connection attempts - (default: '12') - (an integer) - --rabbit_password: rabbit password - (default: 'guest') - --rabbit_port: rabbit port - (default: '5672') - (an integer) - --rabbit_retry_interval: rabbit connection retry interval - (default: '10') - (an integer) - --rabbit_userid: rabbit userid - (default: 'guest') - --rabbit_virtual_host: rabbit virtual host - (default: '/') - --region_list: list of region=fqdn pairs separated by commas - (default: '') - (a comma separated list) - --report_interval: seconds between nodes reporting state to datastore - (default: '10') - (a positive integer) - --s3_dmz: s3 dmz ip (for instances) - (default: '$my_ip') - --s3_host: s3 host (for infrastructure) - (default: '$my_ip') - --s3_port: s3 port - (default: '3333') - (an integer) - --scheduler_manager: Manager for scheduler - (default: 'nova.scheduler.manager.SchedulerManager') - --scheduler_topic: the topic scheduler nodes listen on - (default: 'scheduler') - --sql_connection: connection string for sql database - (default: 'sqlite:///$state_path/nova.sqlite') - --sql_idle_timeout: timeout for idle sql database connections - (default: '3600') - --sql_max_retries: sql connection attempts - (default: '12') - (an integer) - --sql_retry_interval: sql connection retry interval - (default: '10') - (an integer) - --state_path: Top-level directory for maintaining nova's state - (default: '/usr/lib/pymodules/python2.6/nova/../') - --[no]use_syslog: output to syslog - (default: 'false') - --[no]verbose: show debug output - (default: 'false') - --volume_manager: Manager for volume - (default: 'nova.volume.manager.VolumeManager') - --volume_name_template: Template string to be used to generate instance names - (default: 'volume-%08x') - --volume_topic: the topic volume nodes listen on - (default: 'volume') - --vpn_image_id: AMI for cloudpipe vpn server - (default: 'ami-cloudpipe') - --vpn_key_suffix: Suffix to add to project name for vpn key and secgroups - (default: '-vpn')
\ No newline at end of file +The OpenStack wiki has a page with the flags listed by their purpose and use at http://wiki.openstack.org/FlagsGrouping.
\ No newline at end of file diff --git a/doc/source/runnova/index.rst b/doc/source/runnova/index.rst index 283d268ce..769bbec84 100644 --- a/doc/source/runnova/index.rst +++ b/doc/source/runnova/index.rst @@ -18,7 +18,7 @@ Running Nova ============ -This guide describes the basics of running and managing Nova. For more administrator's documentation, refer to `docs.openstack.org <http://docs.openstack.org>`_. +This guide describes the basics of running and managing Nova. This site is intended to provide developer documentation. For more administrator's documentation, refer to `docs.openstack.org <http://docs.openstack.org>`_. Running the Cloud ----------------- @@ -60,7 +60,7 @@ For background on the core objects referenced in this section, see :doc:`../obje Deployment ---------- -For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). For instructions on multi-server installations, refer to `Installing and Configuring OpenStack Compute <http://docs.openstack.org/openstack-compute/admin/content/ch03.html>`_. +For a starting multi-node architecture, you would start with two nodes - a cloud controller node and a compute node. The cloud controller node contains the nova- services plus the Nova database. The compute node installs all the nova-services but then refers to the database installation, which is hosted by the cloud controller node. Ensure that the nova.conf file is identical on each node. If you find performance issues not related to database reads or writes, but due to the messaging queue backing up, you could add additional messaging services (rabbitmq). For instructions on multi-server installations, refer to `Installing and Configuring OpenStack Compute <http://docs.openstack.org/>`_. .. toctree:: diff --git a/doc/source/runnova/managing.images.rst b/doc/source/runnova/managing.images.rst index c5d93a6e8..a2e618602 100644 --- a/doc/source/runnova/managing.images.rst +++ b/doc/source/runnova/managing.images.rst @@ -18,4 +18,9 @@ Managing Images =============== -.. todo:: Put info on managing images here! +With Nova, you can manage images either using the built-in object store or using Glance, a related OpenStack project. Glance is a server that provides the following services: + + * Ability to store and retrieve virtual machine images + * Ability to store and retrieve metadata about these virtual machine images + +Refer to http://glance.openstack.org for additional details.
\ No newline at end of file diff --git a/doc/source/runnova/managing.instance.types.rst b/doc/source/runnova/managing.instance.types.rst index 746077716..a575e16b7 100644 --- a/doc/source/runnova/managing.instance.types.rst +++ b/doc/source/runnova/managing.instance.types.rst @@ -16,6 +16,8 @@ Managing Instance Types and Flavors =================================== +You can manage instance types and instance flavors using the nova-manage command-line interface coupled with the instance_type subcommand for nova-manage. + What are Instance Types or Flavors ? ------------------------------------ diff --git a/doc/source/runnova/managing.users.rst b/doc/source/runnova/managing.users.rst index 392142e86..d3442bed9 100644 --- a/doc/source/runnova/managing.users.rst +++ b/doc/source/runnova/managing.users.rst @@ -38,11 +38,11 @@ Role-based access control (RBAC) is an approach to restricting system access to Nova’s rights management system employs the RBAC model and currently supports the following five roles: -* **Cloud Administrator.** (admin) Users of this class enjoy complete system access. +* **Cloud Administrator.** (cloudadmin) Users of this class enjoy complete system access. * **IT Security.** (itsec) This role is limited to IT security personnel. It permits role holders to quarantine instances. -* **Project Manager.** (projectmanager)The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances. +* **System Administrator.** (sysadmin) The default for project owners, this role affords users the ability to add other users to a project, interact with project images, and launch and terminate instances. * **Network Administrator.** (netadmin) Users with this role are permitted to allocate and assign publicly accessible IP addresses as well as create and modify firewall rules. -* **Developer.** This is a general purpose role that is assigned to users by default. +* **Developer.** (developer) This is a general purpose role that is assigned to users by default. RBAC management is exposed through the dashboard for simplified user management. diff --git a/doc/source/runnova/managingsecurity.rst b/doc/source/runnova/managingsecurity.rst index 7893925e7..85329ed4a 100644 --- a/doc/source/runnova/managingsecurity.rst +++ b/doc/source/runnova/managingsecurity.rst @@ -18,8 +18,6 @@ Security Considerations ======================= -.. todo:: This doc is vague and just high-level right now. Describe architecture that enables security. - The goal of securing a cloud computing system involves both protecting the instances, data on the instances, and ensuring users are authenticated for actions and that borders are understood by the users and the system. Protecting the system from intrusion or attack involves authentication, network protections, and diff --git a/doc/source/runnova/network.vlan.rst b/doc/source/runnova/network.vlan.rst index c06ce8e8b..df19c7a80 100644 --- a/doc/source/runnova/network.vlan.rst +++ b/doc/source/runnova/network.vlan.rst @@ -36,9 +36,7 @@ In this mode, each project gets its own VLAN, Linux networking bridge, and subne While network traffic between VM instances belonging to the same VLAN is always open, Nova can enforce isolation of network traffic between different projects by enforcing one VLAN per project. -In addition, the network administrator can specify a pool of public IP addresses that users may allocate and then assign to VMs, either at boot or dynamically at run-time. This capability is similar to Amazon's 'elastic IPs'. A public IP address may be associated with a running instances, allowing the VM instance to be accessed from the public network. The public IP addresses are accessible from the network host and NATed to the private IP address of the project. - -.. todo:: Describe how a public IP address could be associated with a project (a VLAN) +In addition, the network administrator can specify a pool of public IP addresses that users may allocate and then assign to VMs, either at boot or dynamically at run-time. This capability is similar to Amazon's 'elastic IPs'. A public IP address may be associated with a running instances, allowing the VM instance to be accessed from the public network. The public IP addresses are accessible from the network host and NATed to the private IP address of the project. A public IP address could be associated with a project using the euca-allocate-address commands. This is the default networking mode and supports the most features. For multiple machine installation, it requires a switch that supports host-managed vlan tagging. In this mode, nova will create a vlan and bridge for each project. The project gets a range of private ips that are only accessible from inside the vlan. In order for a user to access the instances in their project, a special vpn instance (code named :ref:`cloudpipe <cloudpipe>`) needs to be created. Nova generates a certificate and key for the user to access the vpn and starts the vpn automatically. More information on cloudpipe can be found :ref:`here <cloudpipe>`. @@ -176,4 +174,3 @@ Setup * project network size * DMZ network -.. todo:: need specific Nova configuration added diff --git a/doc/source/runnova/nova.manage.rst b/doc/source/runnova/nova.manage.rst index 0636e5752..af82b6a4f 100644 --- a/doc/source/runnova/nova.manage.rst +++ b/doc/source/runnova/nova.manage.rst @@ -83,13 +83,13 @@ Nova User Nova Project ~~~~~~~~~~~~ -``nova-manage project add <projectname>`` +``nova-manage project add <projectname> <username>`` - Add a nova project with the name <projectname> to the database. + Add a nova project with the name <projectname> to the database that will be administered by the named user. -``nova-manage project create <projectname>`` +``nova-manage project create <projectname> <projectmanager>`` - Create a new nova project with the name <projectname> (you still need to do nova-manage project add <projectname> to add it to the database). + Create a new nova project with the name <projectname> (you still need to do nova-manage project add <projectname> to add it to the database). The <projectmanager> username is the administrator of the project. ``nova-manage project delete <projectname>`` @@ -111,9 +111,9 @@ Nova Project Deletes the project with the name <projectname>. -``nova-manage project zipfile`` +``nova-manage project zipfile <projectname> <username> <directory for credentials>`` - Compresses all related files for a created project into a zip file nova.zip. + Compresses all related files for a created project into a named zip file such as nova.zip. Nova Role ~~~~~~~~~ @@ -226,7 +226,7 @@ Concept: Plugins Concept: IPC/RPC ---------------- -Rabbit! +Rabbit is the main messaging queue, used for all communication between Nova components and it also does the remote procedure calls and inter-process communication. Concept: Fakes diff --git a/nova/CA/geninter.sh b/nova/CA/geninter.sh index 4b7f5a55c..9b3ea3b76 100755 --- a/nova/CA/geninter.sh +++ b/nova/CA/geninter.sh @@ -21,7 +21,7 @@ NAME=$1 SUBJ=$2 mkdir -p projects/$NAME cd projects/$NAME -cp ../../openssl.cnf.tmpl openssl.cnf +cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf sed -i -e s/%USERNAME%/$NAME/g openssl.cnf mkdir -p certs crl newcerts private openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes diff --git a/nova/CA/openssl.cnf.tmpl b/nova/CA/openssl.cnf.tmpl index dd81f1c2b..f87d9f3b2 100644 --- a/nova/CA/openssl.cnf.tmpl +++ b/nova/CA/openssl.cnf.tmpl @@ -41,9 +41,13 @@ nameopt = default_ca certopt = default_ca policy = policy_match +# NOTE(dprince): stateOrProvinceName must be 'supplied' or 'optional' to +# work around a stateOrProvince printable string UTF8 mismatch on +# RHEL 6 and Fedora 14 (using openssl-1.0.0-4.el6.x86_64 or +# openssl-1.0.0d-1.fc14.x86_64) [ policy_match ] -countryName = match -stateOrProvinceName = match +countryName = supplied +stateOrProvinceName = supplied organizationName = optional organizationalUnitName = optional commonName = supplied diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 0fedbbfad..747015af5 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -15,5 +15,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -"""No-op __init__ for directory full of api goodies.""" diff --git a/nova/api/direct.py b/nova/api/direct.py index f487df7c7..8ceae299c 100644 --- a/nova/api/direct.py +++ b/nova/api/direct.py @@ -44,14 +44,33 @@ from nova import utils from nova import wsgi +# Global storage for registering modules. ROUTES = {} def register_service(path, handle): + """Register a service handle at a given path. + + Services registered in this way will be made available to any instances of + nova.api.direct.Router. + + :param path: `routes` path, can be a basic string like "/path" + :param handle: an object whose methods will be made available via the api + + """ ROUTES[path] = handle class Router(wsgi.Router): + """A simple WSGI router configured via `register_service`. + + This is a quick way to attach multiple services to a given endpoint. + It will automatically load the routes registered in the `ROUTES` global. + + TODO(termie): provide a paste-deploy version of this. + + """ + def __init__(self, mapper=None): if mapper is None: mapper = routes.Mapper() @@ -66,6 +85,24 @@ class Router(wsgi.Router): class DelegatedAuthMiddleware(wsgi.Middleware): + """A simple and naive authentication middleware. + + Designed mostly to provide basic support for alternative authentication + schemes, this middleware only desires the identity of the user and will + generate the appropriate nova.context.RequestContext for the rest of the + application. This allows any middleware above it in the stack to + authenticate however it would like while only needing to conform to a + minimal interface. + + Expects two headers to determine identity: + - X-OpenStack-User + - X-OpenStack-Project + + This middleware is tied to identity management and will need to be kept + in sync with any changes to the way identity is dealt with internally. + + """ + def process_request(self, request): os_user = request.headers['X-OpenStack-User'] os_project = request.headers['X-OpenStack-Project'] @@ -74,6 +111,20 @@ class DelegatedAuthMiddleware(wsgi.Middleware): class JsonParamsMiddleware(wsgi.Middleware): + """Middleware to allow method arguments to be passed as serialized JSON. + + Accepting arguments as JSON is useful for accepting data that may be more + complex than simple primitives. + + In this case we accept it as urlencoded data under the key 'json' as in + json=<urlencoded_json> but this could be extended to accept raw JSON + in the POST body. + + Filters out the parameters `self`, `context` and anything beginning with + an underscore. + + """ + def process_request(self, request): if 'json' not in request.params: return @@ -92,6 +143,13 @@ class JsonParamsMiddleware(wsgi.Middleware): class PostParamsMiddleware(wsgi.Middleware): + """Middleware to allow method arguments to be passed as POST parameters. + + Filters out the parameters `self`, `context` and anything beginning with + an underscore. + + """ + def process_request(self, request): params_parsed = request.params params = {} @@ -106,12 +164,21 @@ class PostParamsMiddleware(wsgi.Middleware): class Reflection(object): - """Reflection methods to list available methods.""" + """Reflection methods to list available methods. + + This is an object that expects to be registered via register_service. + These methods allow the endpoint to be self-describing. They introspect + the exposed methods and provide call signatures and documentation for + them allowing quick experimentation. + + """ + def __init__(self): self._methods = {} self._controllers = {} def _gather_methods(self): + """Introspect available methods and generate documentation for them.""" methods = {} controllers = {} for route, handler in ROUTES.iteritems(): @@ -185,6 +252,16 @@ class Reflection(object): class ServiceWrapper(wsgi.Controller): + """Wrapper to dynamically povide a WSGI controller for arbitrary objects. + + With lightweight introspection allows public methods on the object to + be accesed via simple WSGI routing and parameters and serializes the + return values. + + Automatically used be nova.api.direct.Router to wrap registered instances. + + """ + def __init__(self, service_handle): self.service_handle = service_handle @@ -260,7 +337,16 @@ class Limited(object): class Proxy(object): - """Pretend a Direct API endpoint is an object.""" + """Pretend a Direct API endpoint is an object. + + This is mostly useful in testing at the moment though it should be easily + extendable to provide a basic API library functionality. + + In testing we use this to stub out internal objects to verify that results + from the API are serializable. + + """ + def __init__(self, app, prefix=None): self.app = app self.prefix = prefix diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index a3c3b25a1..c13993dd3 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -46,8 +46,6 @@ flags.DEFINE_integer('lockout_minutes', 15, 'Number of minutes to lockout if triggered.') flags.DEFINE_integer('lockout_window', 15, 'Number of minutes for lockout window.') -flags.DEFINE_list('lockout_memcached_servers', None, - 'Memcached servers or None for in process cache.') class RequestLogging(wsgi.Middleware): @@ -107,11 +105,11 @@ class Lockout(wsgi.Middleware): def __init__(self, application): """middleware can use fake for testing.""" - if FLAGS.lockout_memcached_servers: + if FLAGS.memcached_servers: import memcache else: from nova import fakememcache as memcache - self.mc = memcache.Client(FLAGS.lockout_memcached_servers, + self.mc = memcache.Client(FLAGS.memcached_servers, debug=0) super(Lockout, self).__init__(application) @@ -322,9 +320,7 @@ class Executor(wsgi.Application): except exception.InstanceNotFound as ex: LOG.info(_('InstanceNotFound raised: %s'), unicode(ex), context=context) - ec2_id = ec2utils.id_to_ec2_id(ex.instance_id) - message = _('Instance %s not found') % ec2_id - return self._error(req, context, type(ex).__name__, message) + return self._error(req, context, type(ex).__name__, ex.message) except exception.VolumeNotFound as ex: LOG.info(_('VolumeNotFound raised: %s'), unicode(ex), context=context) @@ -342,6 +338,10 @@ class Executor(wsgi.Application): else: return self._error(req, context, type(ex).__name__, unicode(ex)) + except exception.KeyPairExists as ex: + LOG.debug(_('KeyPairExists raised: %s'), unicode(ex), + context=context) + return self._error(req, context, type(ex).__name__, unicode(ex)) except Exception as ex: extra = {'environment': req.environ} LOG.exception(_('Unexpected error raised: %s'), unicode(ex), diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index c0c2bcd0d..0d08aba7a 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -272,7 +272,7 @@ class AdminController(object): def _vpn_for(self, context, project_id): """Get the VPN instance for a project ID.""" for instance in db.instance_get_all_by_project(context, project_id): - if (instance['image_id'] == FLAGS.vpn_image_id + if (instance['image_id'] == str(FLAGS.vpn_image_id) and not instance['state_description'] in ['shutting_down', 'shutdown']): return instance diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index d7ad08d2f..6672e60bb 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -196,7 +196,7 @@ class APIRequest(object): elif isinstance(data, datetime.datetime): data_el.appendChild( xml.createTextNode(_database_to_isoformat(data))) - elif data != None: + elif data is not None: data_el.appendChild(xml.createTextNode(str(data))) return data_el diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 58effd134..c35b6024e 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -27,6 +27,8 @@ import datetime import IPy import os import urllib +import tempfile +import shutil from nova import compute from nova import context @@ -35,6 +37,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import ipv6 from nova import log as logging from nova import network from nova import utils @@ -49,8 +52,6 @@ flags.DECLARE('service_down_time', 'nova.scheduler.driver') LOG = logging.getLogger("nova.api.cloud") -InvalidInputException = exception.InvalidInputException - def _gen_key(context, user_id, key_name): """Generate a key @@ -61,8 +62,7 @@ def _gen_key(context, user_id, key_name): # creation before creating key_pair try: db.key_pair_get(context, user_id, key_name) - raise exception.Duplicate(_("The key_pair %s already exists") - % key_name) + raise exception.KeyPairExists(key_name=key_name) except exception.NotFound: pass private_key, public_key, fingerprint = crypto.generate_key_pair() @@ -142,6 +142,11 @@ class CloudController(object): instance_ref = self.compute_api.get_all(ctxt, fixed_ip=address) if instance_ref is None: return None + + # This ensures that all attributes of the instance + # are populated. + instance_ref = db.instance_get(ctxt, instance_ref['id']) + mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) if instance_ref['key_name']: keys = {'0': {'_name': instance_ref['key_name'], @@ -154,7 +159,7 @@ class CloudController(object): floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) - image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine') + image_ec2_id = self.image_ec2_id(instance_ref['image_id']) data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -182,9 +187,9 @@ class CloudController(object): 'mpi': mpi}} for image_type in ['kernel', 'ramdisk']: - if '%s_id' % image_type in instance_ref: - ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type], - image_type) + if instance_ref.get('%s_id' % image_type): + ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type], + self._image_type(image_type)) data['meta-data']['%s-id' % image_type] = ec2_id if False: # TODO(vish): store ancestor ids @@ -313,6 +318,27 @@ class CloudController(object): 'keyMaterial': data['private_key']} # TODO(vish): when context is no longer an object, pass it here + def import_public_key(self, context, key_name, public_key, + fingerprint=None): + LOG.audit(_("Import key %s"), key_name, context=context) + key = {} + key['user_id'] = context.user_id + key['name'] = key_name + key['public_key'] = public_key + if fingerprint is None: + tmpdir = tempfile.mkdtemp() + pubfile = os.path.join(tmpdir, 'temp.pub') + fh = open(pubfile, 'w') + fh.write(public_key) + fh.close() + (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f', + '%s' % (pubfile)) + fingerprint = out.split(' ')[1] + shutil.rmtree(tmpdir) + key['fingerprint'] = fingerprint + db.key_pair_create(context, key) + return True + def delete_key_pair(self, context, key_name, **kwargs): LOG.audit(_("Delete key pair %s"), key_name, context=context) try: @@ -394,11 +420,11 @@ class CloudController(object): ip_protocol = str(ip_protocol) if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']: - raise InvalidInputException(_('%s is not a valid ipProtocol') % - (ip_protocol,)) + raise exception.InvalidIpProtocol(protocol=ip_protocol) if ((min(from_port, to_port) < -1) or (max(from_port, to_port) > 65535)): - raise InvalidInputException(_('Invalid port range')) + raise exception.InvalidPortRange(from_port=from_port, + to_port=to_port) values['protocol'] = ip_protocol values['from_port'] = from_port @@ -437,7 +463,7 @@ class CloudController(object): group_name) criteria = self._revoke_rule_args_to_dict(context, **kwargs) - if criteria == None: + if criteria is None: raise exception.ApiError(_("Not enough parameters to build a " "valid rule.")) @@ -608,7 +634,7 @@ class CloudController(object): # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. - return {'volumeSet': [self._format_volume(context, dict(volume))]} + return self._format_volume(context, dict(volume)) def delete_volume(self, context, volume_id, **kwargs): volume_id = ec2utils.ec2_id_to_id(volume_id) @@ -659,7 +685,7 @@ class CloudController(object): 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')} def _convert_to_set(self, lst, label): - if lst == None or lst == []: + if lst is None or lst == []: return None if not isinstance(lst, list): lst = [lst] @@ -698,13 +724,13 @@ class CloudController(object): instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.is_admin: - if instance['image_id'] == FLAGS.vpn_image_id: + if instance['image_id'] == str(FLAGS.vpn_image_id): continue i = {} instance_id = instance['id'] ec2_id = ec2utils.id_to_ec2_id(instance_id) i['instanceId'] = ec2_id - i['imageId'] = self._image_ec2_id(instance['image_id']) + i['imageId'] = self.image_ec2_id(instance['image_id']) i['instanceState'] = { 'code': instance['state'], 'name': instance['state_description']} @@ -716,12 +742,15 @@ class CloudController(object): fixed = instance['fixed_ip'] floating_addr = fixed['floating_ips'][0]['address'] if instance['fixed_ip']['network'] and 'use_v6' in kwargs: - i['dnsNameV6'] = utils.to_global_ipv6( + i['dnsNameV6'] = ipv6.to_global( instance['fixed_ip']['network']['cidr_v6'], - instance['mac_address']) + instance['mac_address'], + instance['project_id']) i['privateDnsName'] = fixed_addr + i['privateIpAddress'] = fixed_addr i['publicDnsName'] = floating_addr + i['ipAddress'] = floating_addr or fixed_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] @@ -730,7 +759,10 @@ class CloudController(object): instance['project_id'], instance['host']) i['productCodesSet'] = self._convert_to_set([], 'product_codes') - i['instanceType'] = instance['instance_type'] + if instance['instance_type']: + i['instanceType'] = instance['instance_type'].get('name') + else: + i['instanceType'] = None i['launchTime'] = instance['created_at'] i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] @@ -785,7 +817,7 @@ class CloudController(object): def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) - return {'addressSet': [{'publicIp': public_ip}]} + return {'publicIp': public_ip} def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) @@ -815,7 +847,7 @@ class CloudController(object): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] instances = self.compute_api.create(context, - instance_type=instance_types.get_by_type( + instance_type=instance_types.get_instance_type_by_name( kwargs.get('instance_type', None)), image_id=self._get_image(context, kwargs['image_id'])['id'], min_count=int(kwargs.get('min_count', max_count)), @@ -872,46 +904,70 @@ class CloudController(object): self.compute_api.update(context, instance_id=instance_id, **kwargs) return True - _type_prefix_map = {'machine': 'ami', - 'kernel': 'aki', - 'ramdisk': 'ari'} + @staticmethod + def _image_type(image_type): + """Converts to a three letter image type. + + aki, kernel => aki + ari, ramdisk => ari + anything else => ami - def _image_ec2_id(self, image_id, image_type='machine'): - prefix = self._type_prefix_map[image_type] - template = prefix + '-%08x' + """ + if image_type == 'kernel': + return 'aki' + if image_type == 'ramdisk': + return 'ari' + if image_type not in ['aki', 'ari']: + return 'ami' + return image_type + + @staticmethod + def image_ec2_id(image_id, image_type='ami'): + """Returns image ec2_id using id and three letter type.""" + template = image_type + '-%08x' return ec2utils.id_to_ec2_id(int(image_id), template=template) def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) return self.image_service.show(context, internal_id) - except exception.NotFound: - return self.image_service.show_by_name(context, ec2_id) + except (exception.InvalidEc2Id, exception.ImageNotFound): + try: + return self.image_service.show_by_name(context, ec2_id) + except exception.NotFound: + raise exception.ImageNotFound(image_id=ec2_id) def _format_image(self, image): """Convert from format defined by BaseImageService to S3 format.""" i = {} - image_type = image['properties'].get('type') - ec2_id = self._image_ec2_id(image.get('id'), image_type) + image_type = self._image_type(image.get('container_format')) + ec2_id = self.image_ec2_id(image.get('id'), image_type) name = image.get('name') i['imageId'] = ec2_id kernel_id = image['properties'].get('kernel_id') if kernel_id: - i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel') + i['kernelId'] = self.image_ec2_id(kernel_id, 'aki') ramdisk_id = image['properties'].get('ramdisk_id') if ramdisk_id: - i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk') + i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari') i['imageOwnerId'] = image['properties'].get('owner_id') if name: i['imageLocation'] = "%s (%s)" % (image['properties']. get('image_location'), name) else: i['imageLocation'] = image['properties'].get('image_location') - i['imageState'] = image['properties'].get('image_state') + # NOTE(vish): fallback status if image_state isn't set + state = image.get('status') + if state == 'active': + state = 'available' + i['imageState'] = image['properties'].get('image_state', state) i['displayName'] = name i['description'] = image.get('description') - i['imageType'] = image_type - i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True' + display_mapping = {'aki': 'kernel', + 'ari': 'ramdisk', + 'ami': 'machine'} + i['imageType'] = display_mapping.get(image_type) + i['isPublic'] = image.get('is_public') == True i['architecture'] = image['properties'].get('architecture') return i @@ -923,8 +979,7 @@ class CloudController(object): try: image = self._get_image(context, ec2_id) except exception.NotFound: - raise exception.NotFound(_('Image %s not found') % - ec2_id) + raise exception.ImageNotFound(image_id=ec2_id) images.append(image) else: images = self.image_service.detail(context) @@ -943,8 +998,9 @@ class CloudController(object): image_location = kwargs['name'] metadata = {'properties': {'image_location': image_location}} image = self.image_service.create(context, metadata) - image_id = self._image_ec2_id(image['id'], - image['properties']['type']) + image_type = self._image_type(image.get('container_format')) + image_id = self.image_ec2_id(image['id'], + image_type) msg = _("Registered image %(image_location)s with" " id %(image_id)s") % locals() LOG.audit(msg, context=context) @@ -957,9 +1013,9 @@ class CloudController(object): try: image = self._get_image(context, image_id) except exception.NotFound: - raise exception.NotFound(_('Image %s not found') % image_id) + raise exception.ImageNotFound(image_id=image_id) result = {'imageId': image_id, 'launchPermission': []} - if image['properties']['is_public']: + if image['is_public']: result['launchPermission'].append({'group': 'all'}) return result @@ -980,11 +1036,11 @@ class CloudController(object): try: image = self._get_image(context, image_id) except exception.NotFound: - raise exception.NotFound(_('Image %s not found') % image_id) + raise exception.ImageNotFound(image_id=image_id) internal_id = image['id'] del(image['id']) - image['properties']['is_public'] = (operation_type == 'add') + image['is_public'] = (operation_type == 'add') return self.image_service.update(context, internal_id, image) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index 3b34f6ea5..163aa4ed2 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -24,7 +24,7 @@ def ec2_id_to_id(ec2_id): try: return int(ec2_id.split('-')[-1], 16) except ValueError: - raise exception.NotFound(_("Id %s Not Found") % ec2_id) + raise exception.InvalidEc2Id(ec2_id=ec2_id) def id_to_ec2_id(instance_id, template='i-%08x'): diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 7545eb0c9..5b7f080ad 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -34,6 +34,7 @@ from nova.api.openstack import consoles from nova.api.openstack import flavors from nova.api.openstack import images from nova.api.openstack import image_metadata +from nova.api.openstack import ips from nova.api.openstack import limits from nova.api.openstack import servers from nova.api.openstack import server_metadata @@ -97,7 +98,8 @@ class APIRouter(wsgi.Router): server_members['inject_network_info'] = 'POST' mapper.resource("zone", "zones", controller=zones.Controller(), - collection={'detail': 'GET', 'info': 'GET'}), + collection={'detail': 'GET', 'info': 'GET', + 'select': 'GET'}) mapper.resource("user", "users", controller=users.Controller(), collection={'detail': 'GET'}) @@ -111,9 +113,6 @@ class APIRouter(wsgi.Router): parent_resource=dict(member_name='server', collection_name='servers')) - _limits = limits.LimitsController() - mapper.resource("limit", "limits", controller=_limits) - super(APIRouter, self).__init__(mapper) @@ -144,6 +143,14 @@ class APIRouterV10(APIRouter): parent_resource=dict(member_name='server', collection_name='servers')) + mapper.resource("limit", "limits", + controller=limits.LimitsControllerV10()) + + mapper.resource("ip", "ips", controller=ips.Controller(), + collection=dict(public='GET', private='GET'), + parent_resource=dict(member_name='server', + collection_name='servers')) + class APIRouterV11(APIRouter): """Define routes specific to OpenStack API V1.1.""" @@ -172,3 +179,6 @@ class APIRouterV11(APIRouter): mapper.resource("flavor", "flavors", controller=flavors.ControllerV11(), collection={'detail': 'GET'}) + + mapper.resource("limit", "limits", + controller=limits.LimitsControllerV11()) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 6e3763e47..00fdd4540 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -48,7 +48,7 @@ class Controller(common.OpenstackController): """We cannot depend on the db layer to check for admin access for the auth manager, so we do it here""" if not context.is_admin: - raise exception.NotAuthorized(_("Not admin user.")) + raise exception.AdminRequired() def index(self, req): raise faults.Fault(webob.exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index f3a9bdeca..6c6ee22a2 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -17,7 +17,6 @@ import datetime import hashlib -import json import time import webob.exc @@ -25,11 +24,9 @@ import webob.dec from nova import auth from nova import context -from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import manager from nova import utils from nova import wsgi from nova.api.openstack import faults @@ -55,6 +52,9 @@ class AuthMiddleware(wsgi.Middleware): user = self.get_user_by_authentication(req) accounts = self.auth.get_projects(user=user) if not user: + token = req.headers["X-Auth-Token"] + msg = _("%(user)s could not be found with token '%(token)s'") + LOG.warn(msg % locals()) return faults.Fault(webob.exc.HTTPUnauthorized()) if accounts: @@ -66,6 +66,8 @@ class AuthMiddleware(wsgi.Middleware): if not self.auth.is_admin(user) and \ not self.auth.is_project_member(user, account): + msg = _("%(user)s must be an admin or a member of %(account)s") + LOG.warn(msg % locals()) return faults.Fault(webob.exc.HTTPUnauthorized()) req.environ['nova.context'] = context.RequestContext(user, account) @@ -82,24 +84,29 @@ class AuthMiddleware(wsgi.Middleware): # honor it path_info = req.path_info if len(path_info) > 1: - return faults.Fault(webob.exc.HTTPUnauthorized()) + msg = _("Authentication requests must be made against a version " + "root (e.g. /v1.0 or /v1.1).") + LOG.warn(msg) + return faults.Fault(webob.exc.HTTPUnauthorized(explanation=msg)) try: username = req.headers['X-Auth-User'] key = req.headers['X-Auth-Key'] - except KeyError: + except KeyError as ex: + LOG.warn(_("Could not find %s in request.") % ex) return faults.Fault(webob.exc.HTTPUnauthorized()) token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() - res.headers['X-Auth-Token'] = token.token_hash + res.headers['X-Auth-Token'] = token['token_hash'] res.headers['X-Server-Management-Url'] = \ - token.server_management_url - res.headers['X-Storage-Url'] = token.storage_url - res.headers['X-CDN-Management-Url'] = token.cdn_management_url + token['server_management_url'] + res.headers['X-Storage-Url'] = token['storage_url'] + res.headers['X-CDN-Management-Url'] = token['cdn_management_url'] res.content_type = 'text/plain' res.status = '204' + LOG.debug(_("Successfully authenticated '%s'") % username) return res else: return faults.Fault(webob.exc.HTTPUnauthorized()) @@ -120,11 +127,11 @@ class AuthMiddleware(wsgi.Middleware): except exception.NotFound: return None if token: - delta = datetime.datetime.now() - token.created_at + delta = datetime.datetime.utcnow() - token['created_at'] if delta.days >= 2: - self.db.auth_token_destroy(ctxt, token.token_hash) + self.db.auth_token_destroy(ctxt, token['token_hash']) else: - return self.auth.get_user(token.user_id) + return self.auth.get_user(token['user_id']) return None def _authorize_user(self, username, key, req): @@ -139,6 +146,7 @@ class AuthMiddleware(wsgi.Middleware): try: user = self.auth.get_user_from_access_key(key) except exception.NotFound: + LOG.warn(_("User not found with provided API key.")) user = None if user and user.name == username: @@ -153,4 +161,9 @@ class AuthMiddleware(wsgi.Middleware): token_dict['user_id'] = user.id token = self.db.auth_token_create(ctxt, token_dict) return token, user + elif user and user.name != username: + msg = _("Provided API key is valid, but not for user " + "'%(username)s'") % locals() + LOG.warn(msg) + return None, None diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index 234f921ab..32cd689ca 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import re from urlparse import urlparse import webob @@ -25,7 +26,7 @@ from nova import log as logging from nova import wsgi -LOG = logging.getLogger('common') +LOG = logging.getLogger('nova.api.openstack.common') FLAGS = flags.FLAGS @@ -116,18 +117,30 @@ def get_image_id_from_image_hash(image_service, context, image_hash): items = image_service.index(context) for image in items: image_id = image['id'] - if abs(hash(image_id)) == int(image_hash): - return image_id - raise exception.NotFound(image_hash) + try: + if abs(hash(image_id)) == int(image_hash): + return image_id + except ValueError: + msg = _("Requested image_id has wrong format: %s," + "should have numerical format") % image_id + LOG.error(msg) + raise Exception(msg) + raise exception.ImageNotFound(image_id=image_hash) def get_id_from_href(href): """Return the id portion of a url as an int. - Given: http://www.foo.com/bar/123?q=4 + Given: 'http://www.foo.com/bar/123?q=4' + Returns: 123 + + In order to support local hrefs, the href argument can be just an id: + Given: '123' Returns: 123 """ + if re.match(r'\d+$', str(href)): + return int(href) try: return int(urlparse(href).path.split('/')[-1]) except: diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index 6efacce52..18de2ec71 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -322,8 +322,7 @@ class Volumes(extensions.ExtensionDescriptor): # Does this matter? res = extensions.ResourceExtension('volumes', VolumeController(), - collection_actions={'detail': 'GET'} - ) + collection_actions={'detail': 'GET'}) resources.append(res) res = extensions.ResourceExtension('volume_attachments', diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 7ea7afef6..8e77b25fb 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -105,15 +105,14 @@ class ExtensionDescriptor(object): actions = [] return actions - def get_response_extensions(self): - """List of extensions.ResponseExtension extension objects. + def get_request_extensions(self): + """List of extensions.RequestException extension objects. - Response extensions are used to insert information into existing - response data. + Request extensions are used to handle custom request data. """ - response_exts = [] - return response_exts + request_exts = [] + return request_exts class ActionExtensionController(common.OpenstackController): @@ -137,7 +136,7 @@ class ActionExtensionController(common.OpenstackController): return res -class ResponseExtensionController(common.OpenstackController): +class RequestExtensionController(common.OpenstackController): def __init__(self, application): self.application = application @@ -148,20 +147,9 @@ class ResponseExtensionController(common.OpenstackController): def process(self, req, *args, **kwargs): res = req.get_response(self.application) - content_type = req.best_match_content_type() - # currently response handlers are un-ordered + # currently request handlers are un-ordered for handler in self.handlers: - res = handler(res) - try: - body = res.body - headers = res.headers - except AttributeError: - default_xmlns = None - body = self._serialize(res, content_type, default_xmlns) - headers = {"Content-Type": content_type} - res = webob.Response() - res.body = body - res.headers = headers + res = handler(req, res) return res @@ -226,24 +214,24 @@ class ExtensionMiddleware(wsgi.Middleware): return action_controllers - def _response_ext_controllers(self, application, ext_mgr, mapper): - """Returns a dict of ResponseExtensionController-s by collection.""" - response_ext_controllers = {} - for resp_ext in ext_mgr.get_response_extensions(): - if not resp_ext.key in response_ext_controllers.keys(): - controller = ResponseExtensionController(application) - mapper.connect(resp_ext.url_route + '.:(format)', + def _request_ext_controllers(self, application, ext_mgr, mapper): + """Returns a dict of RequestExtensionController-s by collection.""" + request_ext_controllers = {} + for req_ext in ext_mgr.get_request_extensions(): + if not req_ext.key in request_ext_controllers.keys(): + controller = RequestExtensionController(application) + mapper.connect(req_ext.url_route + '.:(format)', action='process', controller=controller, - conditions=resp_ext.conditions) + conditions=req_ext.conditions) - mapper.connect(resp_ext.url_route, + mapper.connect(req_ext.url_route, action='process', controller=controller, - conditions=resp_ext.conditions) - response_ext_controllers[resp_ext.key] = controller + conditions=req_ext.conditions) + request_ext_controllers[req_ext.key] = controller - return response_ext_controllers + return request_ext_controllers def __init__(self, application, ext_mgr=None): @@ -271,13 +259,13 @@ class ExtensionMiddleware(wsgi.Middleware): controller = action_controllers[action.collection] controller.add_action(action.action_name, action.handler) - # extended responses - resp_controllers = self._response_ext_controllers(application, ext_mgr, + # extended requests + req_controllers = self._request_ext_controllers(application, ext_mgr, mapper) - for response_ext in ext_mgr.get_response_extensions(): - LOG.debug(_('Extended response: %s'), response_ext.key) - controller = resp_controllers[response_ext.key] - controller.add_handler(response_ext.handler) + for request_ext in ext_mgr.get_request_extensions(): + LOG.debug(_('Extended request: %s'), request_ext.key) + controller = req_controllers[request_ext.key] + controller.add_handler(request_ext.handler) self._router = routes.middleware.RoutesMiddleware(self._dispatch, mapper) @@ -347,17 +335,17 @@ class ExtensionManager(object): pass return actions - def get_response_extensions(self): - """Returns a list of ResponseExtension objects.""" - response_exts = [] + def get_request_extensions(self): + """Returns a list of RequestExtension objects.""" + request_exts = [] for alias, ext in self.extensions.iteritems(): try: - response_exts.extend(ext.get_response_extensions()) + request_exts.extend(ext.get_request_extensions()) except AttributeError: - # NOTE(dprince): Extension aren't required to have response + # NOTE(dprince): Extension aren't required to have request # extensions pass - return response_exts + return request_exts def _check_extension(self, extension): """Checks for required methods in extension objects.""" @@ -421,9 +409,13 @@ class ExtensionManager(object): self.extensions[alias] = ext -class ResponseExtension(object): - """Add data to responses from core nova OpenStack API controllers.""" +class RequestExtension(object): + """Extend requests and responses of core nova OpenStack API controllers. + Provide a way to add data to responses and handle custom request data + that is sent to core nova OpenStack API controllers. + + """ def __init__(self, method, url_route, handler): self.url_route = url_route self.handler = handler diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py index bc97639a0..87118ce19 100644 --- a/nova/api/openstack/faults.py +++ b/nova/api/openstack/faults.py @@ -47,7 +47,7 @@ class Fault(webob.exc.HTTPException): """Generate a WSGI response based on the exception passed to ctor.""" # Replace the body with fault details. code = self.wrapped_exc.status_int - fault_name = self._fault_names.get(code, "computeFault") + fault_name = self._fault_names.get(code, "cloudServersFault") fault_data = { fault_name: { 'code': code, diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 40787bd17..4c5971cf6 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -45,6 +45,9 @@ class Controller(common.OpenstackController): items = self._get_flavors(req, is_detail=True) return dict(flavors=items) + def _get_view_builder(self, req): + raise NotImplementedError() + def _get_flavors(self, req, is_detail=True): """Helper function that returns a list of flavor dicts.""" ctxt = req.environ['nova.context'] diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index e673e5f7b..1eccc0174 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -18,6 +18,7 @@ from webob import exc from nova import flags +from nova import quota from nova import utils from nova import wsgi from nova.api.openstack import common @@ -40,6 +41,15 @@ class Controller(common.OpenstackController): metadata = image.get('properties', {}) return metadata + def _check_quota_limit(self, context, metadata): + if metadata is None: + return + num_metadata = len(metadata) + quota_metadata = quota.allowed_metadata_items(context, num_metadata) + if quota_metadata < num_metadata: + expl = _("Image metadata limit exceeded") + raise exc.HTTPBadRequest(explanation=expl) + def index(self, req, image_id): """Returns the list of metadata for a given instance""" context = req.environ['nova.context'] @@ -62,6 +72,7 @@ class Controller(common.OpenstackController): if 'metadata' in body: for key, value in body['metadata'].iteritems(): metadata[key] = value + self._check_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) return dict(metadata=metadata) @@ -78,6 +89,7 @@ class Controller(common.OpenstackController): img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id, img) metadata[id] = body[id] + self._check_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 77baf5947..34d4c27fc 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -127,7 +127,7 @@ class Controller(common.OpenstackController): raise webob.exc.HTTPBadRequest() image = self._compute_service.snapshot(context, server_id, image_name) - return self.get_builder(req).build(image, detail=True) + return dict(image=self.get_builder(req).build(image, detail=True)) def get_builder(self, request): """Indicates that you must use a Controller subclass.""" diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py new file mode 100644 index 000000000..778e9ba1a --- /dev/null +++ b/nova/api/openstack/ips.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from webob import exc + +import nova +import nova.api.openstack.views.addresses +from nova.api.openstack import common +from nova.api.openstack import faults + + +class Controller(common.OpenstackController): + """The servers addresses API controller for the Openstack API.""" + + _serialization_metadata = { + 'application/xml': { + 'list_collections': { + 'public': {'item_name': 'ip', 'item_key': 'addr'}, + 'private': {'item_name': 'ip', 'item_key': 'addr'}, + }, + }, + } + + def __init__(self): + self.compute_api = nova.compute.API() + self.builder = nova.api.openstack.views.addresses.ViewBuilderV10() + + def index(self, req, server_id): + try: + instance = self.compute_api.get(req.environ['nova.context'], id) + except nova.exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return {'addresses': self.builder.build(instance)} + + def public(self, req, server_id): + try: + instance = self.compute_api.get(req.environ['nova.context'], id) + except nova.exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return {'public': self.builder.build_public_parts(instance)} + + def private(self, req, server_id): + try: + instance = self.compute_api.get(req.environ['nova.context'], id) + except nova.exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return {'private': self.builder.build_private_parts(instance)} + + def show(self, req, server_id, id): + return faults.Fault(exc.HTTPNotImplemented()) + + def create(self, req, server_id): + return faults.Fault(exc.HTTPNotImplemented()) + + def delete(self, req, server_id, id): + return faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index 9877af191..bd0250a7f 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -30,10 +30,11 @@ from collections import defaultdict from webob.dec import wsgify +from nova import quota from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults -from nova.wsgi import Middleware +from nova.api.openstack.views import limits as limits_views # Convenience constants for the limits dictionary passed to Limiter(). @@ -51,8 +52,8 @@ class LimitsController(common.OpenstackController): _serialization_metadata = { "application/xml": { "attributes": { - "limit": ["verb", "URI", "regex", "value", "unit", - "resetTime", "remaining", "name"], + "limit": ["verb", "URI", "uri", "regex", "value", "unit", + "resetTime", "next-available", "remaining", "name"], }, "plurals": { "rate": "limit", @@ -64,15 +65,25 @@ class LimitsController(common.OpenstackController): """ Return all global and rate limit information. """ - abs_limits = {} + context = req.environ['nova.context'] + abs_limits = quota.get_project_quotas(context, context.project_id) rate_limits = req.environ.get("nova.limits", []) - return { - "limits": { - "rate": rate_limits, - "absolute": abs_limits, - }, - } + builder = self._get_view_builder(req) + return builder.build(rate_limits, abs_limits) + + def _get_view_builder(self, req): + raise NotImplementedError() + + +class LimitsControllerV10(LimitsController): + def _get_view_builder(self, req): + return limits_views.ViewBuilderV10() + + +class LimitsControllerV11(LimitsController): + def _get_view_builder(self, req): + return limits_views.ViewBuilderV11() class Limit(object): @@ -186,7 +197,7 @@ DEFAULT_LIMITS = [ ] -class RateLimitingMiddleware(Middleware): +class RateLimitingMiddleware(wsgi.Middleware): """ Rate-limits requests passing through this middleware. All limit information is stored in memory for this implementation. @@ -200,7 +211,7 @@ class RateLimitingMiddleware(Middleware): @param application: WSGI application to wrap @param limits: List of dictionaries describing limits """ - Middleware.__init__(self, application) + wsgi.Middleware.__init__(self, application) self._limiter = Limiter(limits or DEFAULT_LIMITS) @wsgify(RequestClass=wsgi.Request) diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index 5c1390b9c..fd64ee4fb 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -18,6 +18,7 @@ from webob import exc from nova import compute +from nova import quota from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults @@ -44,10 +45,14 @@ class Controller(common.OpenstackController): def create(self, req, server_id): context = req.environ['nova.context'] - body = self._deserialize(req.body, req.get_content_type()) - self.compute_api.update_or_create_instance_metadata(context, - server_id, - body['metadata']) + data = self._deserialize(req.body, req.get_content_type()) + metadata = data.get('metadata') + try: + self.compute_api.update_or_create_instance_metadata(context, + server_id, + metadata) + except quota.QuotaError as error: + self._handle_quota_error(error) return req.body def update(self, req, server_id, id): @@ -59,9 +64,13 @@ class Controller(common.OpenstackController): if len(body) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) - self.compute_api.update_or_create_instance_metadata(context, - server_id, - body) + try: + self.compute_api.update_or_create_instance_metadata(context, + server_id, + body) + except quota.QuotaError as error: + self._handle_quota_error(error) + return req.body def show(self, req, server_id, id): @@ -77,3 +86,9 @@ class Controller(common.OpenstackController): """ Deletes an existing metadata """ context = req.environ['nova.context'] self.compute_api.delete_instance_metadata(context, server_id, id) + + def _handle_quota_error(self, error): + """Reraise quota errors as api-specific http exceptions.""" + if error.code == "MetadataLimitExceeded": + raise exc.HTTPBadRequest(explanation=error.message) + raise error diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index f224825cc..5c10fc916 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -14,33 +14,30 @@ # under the License. import base64 -import hashlib import traceback from webob import exc from xml.dom import minidom from nova import compute -from nova import context from nova import exception from nova import flags from nova import log as logging from nova import quota from nova import utils -from nova import wsgi from nova.api.openstack import common from nova.api.openstack import faults import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors +import nova.api.openstack.views.images import nova.api.openstack.views.servers from nova.auth import manager as auth_manager from nova.compute import instance_types -from nova.compute import power_state import nova.api.openstack from nova.scheduler import api as scheduler_api -LOG = logging.getLogger('server') +LOG = logging.getLogger('nova.api.openstack.servers') FLAGS = flags.FLAGS @@ -55,6 +52,13 @@ class Controller(common.OpenstackController): "imageRef"], "link": ["rel", "type", "href"], }, + "dict_collections": { + "metadata": {"item_name": "meta", "item_key": "key"}, + }, + "list_collections": { + "public": {"item_name": "ip", "item_key": "addr"}, + "private": {"item_name": "ip", "item_key": "addr"}, + }, }, } @@ -63,15 +67,6 @@ class Controller(common.OpenstackController): self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() - def ips(self, req, id): - try: - instance = self.compute_api.get(req.environ['nova.context'], id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - - builder = self._get_addresses_view_builder(req) - return builder.build(instance) - def index(self, req): """ Returns a list of server names and ids for a given user """ return self._items(req, is_detail=False) @@ -80,6 +75,21 @@ class Controller(common.OpenstackController): """ Returns a list of server details for a given user """ return self._items(req, is_detail=True) + def _image_id_from_req_data(self, data): + raise NotImplementedError() + + def _flavor_id_from_req_data(self, data): + raise NotImplementedError() + + def _get_view_builder(self, req): + raise NotImplementedError() + + def _limit_items(self, items, req): + raise NotImplementedError() + + def _action_rebuild(self, info, request, instance_id): + raise NotImplementedError() + def _items(self, req, is_detail): """Returns a list of servers for a given user. @@ -120,6 +130,8 @@ class Controller(common.OpenstackController): context = req.environ['nova.context'] + password = self._get_server_admin_password(env['server']) + key_name = None key_data = None key_pairs = auth_manager.AuthManager.get_key_pairs(context) @@ -129,21 +141,16 @@ class Controller(common.OpenstackController): key_data = key_pair['public_key'] requested_image_id = self._image_id_from_req_data(env) - image_id = common.get_image_id_from_image_hash(self._image_service, - context, requested_image_id) + try: + image_id = common.get_image_id_from_image_hash(self._image_service, + context, requested_image_id) + except: + msg = _("Can not find requested image") + return faults.Fault(exc.HTTPBadRequest(msg)) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( req, image_id) - # Metadata is a list, not a Dictionary, because we allow duplicate keys - # (even though JSON can't encode this) - # In future, we may not allow duplicate keys. - # However, the CloudServers API is not definitive on this front, - # and we want to be compatible. - metadata = [] - if env['server'].get('metadata'): - for k, v in env['server']['metadata'].items(): - metadata.append({'key': k, 'value': v}) - personality = env['server'].get('personality') injected_files = [] if personality: @@ -160,9 +167,11 @@ class Controller(common.OpenstackController): name = name.strip() try: + inst_type = \ + instance_types.get_instance_type_by_flavor_id(flavor_id) (inst,) = self.compute_api.create( context, - instance_types.get_by_flavor_id(flavor_id), + inst_type, image_id, kernel_id=kernel_id, ramdisk_id=ramdisk_id, @@ -170,20 +179,18 @@ class Controller(common.OpenstackController): display_description=name, key_name=key_name, key_data=key_data, - metadata=metadata, - injected_files=injected_files) + metadata=env['server'].get('metadata', {}), + injected_files=injected_files, + admin_password=password) except quota.QuotaError as error: self._handle_quota_error(error) - inst['instance_type'] = flavor_id + inst['instance_type'] = inst_type inst['image_id'] = requested_image_id builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) - password = utils.generate_password(16) server['server']['adminPass'] = password - self.compute_api.set_admin_password(context, server['server']['id'], - password) return server def _deserialize_create(self, request): @@ -242,6 +249,10 @@ class Controller(common.OpenstackController): # if the original error is okay, just reraise it raise error + def _get_server_admin_password(self, server): + """ Determine the admin password for a server on creation """ + return utils.generate_password(16) + @scheduler_api.redirect_handler def update(self, req, id): """ Updates the server name or password """ @@ -320,9 +331,6 @@ class Controller(common.OpenstackController): return faults.Fault(exc.HTTPBadRequest()) return exc.HTTPAccepted() - def _action_rebuild(self, input_dict, req, id): - return faults.Fault(exc.HTTPNotImplemented()) - def _action_resize(self, input_dict, req, id): """ Resizes a given instance to the flavor size requested """ try: @@ -336,18 +344,20 @@ class Controller(common.OpenstackController): except Exception, e: LOG.exception(_("Error in resize %s"), e) return faults.Fault(exc.HTTPBadRequest()) - return faults.Fault(exc.HTTPAccepted()) + return exc.HTTPAccepted() def _action_reboot(self, input_dict, req, id): - try: + if 'reboot' in input_dict and 'type' in input_dict['reboot']: reboot_type = input_dict['reboot']['type'] - except Exception: - raise faults.Fault(exc.HTTPNotImplemented()) + else: + LOG.exception(_("Missing argument 'type' for reboot")) + return faults.Fault(exc.HTTPUnprocessableEntity()) try: # TODO(gundlach): pass reboot_type, support soft reboot in # virt driver self.compute_api.reboot(req.environ['nova.context'], id) - except: + except Exception, e: + LOG.exception(_("Error in reboot %s"), e) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -561,24 +571,21 @@ class Controller(common.OpenstackController): """ image_id = image_meta['id'] if image_meta['status'] != 'active': - raise exception.Invalid( - _("Cannot build from image %(image_id)s, status not active") % - locals()) + raise exception.ImageUnacceptable(image_id=image_id, + reason=_("status is not active")) - if image_meta['properties']['disk_format'] != 'ami': + if image_meta.get('container_format') != 'ami': return None, None try: kernel_id = image_meta['properties']['kernel_id'] except KeyError: - raise exception.NotFound( - _("Kernel not found for image %(image_id)s") % locals()) + raise exception.KernelNotFoundForImage(image_id=image_id) try: ramdisk_id = image_meta['properties']['ramdisk_id'] except KeyError: - raise exception.NotFound( - _("Ramdisk not found for image %(image_id)s") % locals()) + raise exception.RamdiskNotFoundForImage(image_id=image_id) return kernel_id, ramdisk_id @@ -595,19 +602,35 @@ class ControllerV10(Controller): return nova.api.openstack.views.servers.ViewBuilderV10( addresses_builder) - def _get_addresses_view_builder(self, req): - return nova.api.openstack.views.addresses.ViewBuilderV10(req) - def _limit_items(self, items, req): return common.limited(items, req) def _parse_update(self, context, server_id, inst_dict, update_dict): if 'adminPass' in inst_dict['server']: - update_dict['admin_pass'] = inst_dict['server']['adminPass'] - try: - self.compute_api.set_admin_password(context, server_id) - except exception.TimeoutException: - return exc.HTTPRequestTimeout() + self.compute_api.set_admin_password(context, server_id, + inst_dict['server']['adminPass']) + + def _action_rebuild(self, info, request, instance_id): + context = request.environ['nova.context'] + instance_id = int(instance_id) + + try: + image_id = info["rebuild"]["imageId"] + except (KeyError, TypeError): + msg = _("Could not parse imageId from request.") + LOG.debug(msg) + return faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + try: + self.compute_api.rebuild(context, instance_id, image_id) + except exception.BuildInProgress: + msg = _("Instance %d is currently being rebuilt.") % instance_id + LOG.debug(msg) + return faults.Fault(exc.HTTPConflict(explanation=msg)) + + response = exc.HTTPAccepted() + response.empty_body = True + return response class ControllerV11(Controller): @@ -629,9 +652,6 @@ class ControllerV11(Controller): return nova.api.openstack.views.servers.ViewBuilderV11( addresses_builder, flavor_builder, image_builder, base_url) - def _get_addresses_view_builder(self, req): - return nova.api.openstack.views.addresses.ViewBuilderV11(req) - def _action_change_password(self, input_dict, req, id): context = req.environ['nova.context'] if (not 'changePassword' in input_dict @@ -648,6 +668,73 @@ class ControllerV11(Controller): def _limit_items(self, items, req): return common.limited_by_marker(items, req) + def _validate_metadata(self, metadata): + """Ensure that we can work with the metadata given.""" + try: + metadata.iteritems() + except AttributeError as ex: + msg = _("Unable to parse metadata key/value pairs.") + LOG.debug(msg) + raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + def _decode_personalities(self, personalities): + """Decode the Base64-encoded personalities.""" + for personality in personalities: + try: + path = personality["path"] + contents = personality["contents"] + except (KeyError, TypeError): + msg = _("Unable to parse personality path/contents.") + LOG.info(msg) + raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + try: + personality["contents"] = base64.b64decode(contents) + except TypeError: + msg = _("Personality content could not be Base64 decoded.") + LOG.info(msg) + raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + def _action_rebuild(self, info, request, instance_id): + context = request.environ['nova.context'] + instance_id = int(instance_id) + + try: + image_ref = info["rebuild"]["imageRef"] + except (KeyError, TypeError): + msg = _("Could not parse imageRef from request.") + LOG.debug(msg) + return faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + image_id = common.get_id_from_href(image_ref) + personalities = info["rebuild"].get("personality", []) + metadata = info["rebuild"].get("metadata", {}) + + self._validate_metadata(metadata) + self._decode_personalities(personalities) + + try: + self.compute_api.rebuild(context, instance_id, image_id, metadata, + personalities) + except exception.BuildInProgress: + msg = _("Instance %d is currently being rebuilt.") % instance_id + LOG.debug(msg) + return faults.Fault(exc.HTTPConflict(explanation=msg)) + + response = exc.HTTPAccepted() + response.empty_body = True + return response + + def _get_server_admin_password(self, server): + """ Determine the admin password for a server on creation """ + password = server.get('adminPass') + if password is None: + return utils.generate_password(16) + if not isinstance(password, basestring) or password == '': + msg = _("Invalid adminPass") + raise exc.HTTPBadRequest(msg) + return password + def get_default_xmlns(self, req): return common.XML_NS_V11 @@ -670,8 +757,9 @@ class ServerCreateRequestXMLDeserializer(object): """Marshal the server attribute of a parsed request""" server = {} server_node = self._find_first_child_named(node, 'server') - for attr in ["name", "imageId", "flavorId"]: - server[attr] = server_node.getAttribute(attr) + for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]: + if server_node.getAttribute(attr): + server[attr] = server_node.getAttribute(attr) metadata = self._extract_metadata(server_node) if metadata is not None: server["metadata"] = metadata diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 077ccfc79..7ae4c3232 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -48,7 +48,7 @@ class Controller(common.OpenstackController): """We cannot depend on the db layer to check for admin access for the auth manager, so we do it here""" if not context.is_admin: - raise exception.NotAuthorized(_("Not admin user")) + raise exception.AdminRequired() def index(self, req): """Return all users in brief""" diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index 90c77855b..2810cce39 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -28,10 +28,16 @@ class ViewBuilder(object): class ViewBuilderV10(ViewBuilder): def build(self, inst): - private_ips = utils.get_from_path(inst, 'fixed_ip/address') - public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') + private_ips = self.build_private_parts(inst) + public_ips = self.build_public_parts(inst) return dict(public=public_ips, private=private_ips) + def build_public_parts(self, inst): + return utils.get_from_path(inst, 'fixed_ip/floating_ips/address') + + def build_private_parts(self, inst): + return utils.get_from_path(inst, 'fixed_ip/address') + class ViewBuilderV11(ViewBuilder): def build(self, inst): diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 16195b050..2773c9c13 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -34,11 +34,11 @@ class ViewBuilder(object): def _format_status(self, image): """Update the status field to standardize format.""" status_mapping = { - 'pending': 'queued', - 'decrypting': 'preparing', - 'untarring': 'saving', - 'available': 'active', - 'killed': 'failed', + 'pending': 'QUEUED', + 'decrypting': 'PREPARING', + 'untarring': 'SAVING', + 'available': 'ACTIVE', + 'killed': 'FAILED', } try: @@ -46,6 +46,14 @@ class ViewBuilder(object): except KeyError: image['status'] = image['status'].upper() + def _build_server(self, image, instance_id): + """Indicates that you must use a ViewBuilder subclass.""" + raise NotImplementedError + + def generate_server_ref(self, server_id): + """Return an href string pointing to this server.""" + return os.path.join(self._url, "servers", str(server_id)) + def generate_href(self, image_id): """Return an href string pointing to this object.""" return os.path.join(self._url, "images", str(image_id)) @@ -66,7 +74,7 @@ class ViewBuilder(object): if "instance_id" in properties: try: - image["serverId"] = int(properties["instance_id"]) + self._build_server(image, int(properties["instance_id"])) except ValueError: pass @@ -85,12 +93,17 @@ class ViewBuilder(object): class ViewBuilderV10(ViewBuilder): """OpenStack API v1.0 Image Builder""" - pass + + def _build_server(self, image, instance_id): + image["serverId"] = instance_id class ViewBuilderV11(ViewBuilder): """OpenStack API v1.1 Image Builder""" + def _build_server(self, image, instance_id): + image["serverRef"] = self.generate_server_ref(instance_id) + def build(self, image_obj, detail=False): """Return a standardized image structure for display by the API.""" image = ViewBuilder.build(self, image_obj, detail) diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py new file mode 100644 index 000000000..e21c9f2fd --- /dev/null +++ b/nova/api/openstack/views/limits.py @@ -0,0 +1,131 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010-2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from nova.api.openstack import common + + +class ViewBuilder(object): + """Openstack API base limits view builder.""" + + def _build_rate_limits(self, rate_limits): + raise NotImplementedError() + + def _build_rate_limit(self, rate_limit): + raise NotImplementedError() + + def _build_absolute_limits(self, absolute_limit): + raise NotImplementedError() + + def build(self, rate_limits, absolute_limits): + rate_limits = self._build_rate_limits(rate_limits) + absolute_limits = self._build_absolute_limits(absolute_limits) + + output = { + "limits": { + "rate": rate_limits, + "absolute": absolute_limits, + }, + } + + return output + + def _build_absolute_limits(self, absolute_limits): + """Builder for absolute limits + + absolute_limits should be given as a dict of limits. + For example: {"ram": 512, "gigabytes": 1024}. + + """ + limit_names = { + "ram": ["maxTotalRAMSize"], + "instances": ["maxTotalInstances"], + "cores": ["maxTotalCores"], + "metadata_items": ["maxServerMeta", "maxImageMeta"], + "injected_files": ["maxPersonality"], + "injected_file_content_bytes": ["maxPersonalitySize"], + } + limits = {} + for name, value in absolute_limits.iteritems(): + if name in limit_names and value is not None: + for name in limit_names[name]: + limits[name] = value + return limits + + def _build_rate_limits(self, rate_limits): + raise NotImplementedError() + + def _build_rate_limit(self, rate_limit): + raise NotImplementedError() + + +class ViewBuilderV10(ViewBuilder): + """Openstack API v1.0 limits view builder.""" + + def _build_rate_limits(self, rate_limits): + return [self._build_rate_limit(r) for r in rate_limits] + + def _build_rate_limit(self, rate_limit): + return { + "verb": rate_limit["verb"], + "URI": rate_limit["URI"], + "regex": rate_limit["regex"], + "value": rate_limit["value"], + "remaining": int(rate_limit["remaining"]), + "unit": rate_limit["unit"], + "resetTime": rate_limit["resetTime"], + } + + +class ViewBuilderV11(ViewBuilder): + """Openstack API v1.1 limits view builder.""" + + def _build_rate_limits(self, rate_limits): + limits = [] + for rate_limit in rate_limits: + _rate_limit_key = None + _rate_limit = self._build_rate_limit(rate_limit) + + # check for existing key + for limit in limits: + if limit["uri"] == rate_limit["URI"] and \ + limit["regex"] == rate_limit["regex"]: + _rate_limit_key = limit + break + + # ensure we have a key if we didn't find one + if not _rate_limit_key: + _rate_limit_key = { + "uri": rate_limit["URI"], + "regex": rate_limit["regex"], + "limit": [], + } + limits.append(_rate_limit_key) + + _rate_limit_key["limit"].append(_rate_limit) + + return limits + + def _build_rate_limit(self, rate_limit): + return { + "verb": rate_limit["verb"], + "value": rate_limit["value"], + "remaining": int(rate_limit["remaining"]), + "unit": rate_limit["unit"], + "next-available": rate_limit["resetTime"], + } diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index d24c025be..0be468edc 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -63,10 +63,12 @@ class ViewBuilder(object): power_state.BLOCKED: 'ACTIVE', power_state.SUSPENDED: 'SUSPENDED', power_state.PAUSED: 'PAUSED', - power_state.SHUTDOWN: 'ACTIVE', - power_state.SHUTOFF: 'ACTIVE', + power_state.SHUTDOWN: 'SHUTDOWN', + power_state.SHUTOFF: 'SHUTOFF', power_state.CRASHED: 'ERROR', - power_state.FAILED: 'ERROR'} + power_state.FAILED: 'ERROR', + power_state.BUILDING: 'BUILD', + } inst_dict = { 'id': int(inst['id']), @@ -82,7 +84,7 @@ class ViewBuilder(object): # Return the metadata as a dictionary metadata = {} for item in inst.get('metadata', []): - metadata[item['key']] = item['value'] + metadata[item['key']] = str(item['value']) inst_dict['metadata'] = metadata inst_dict['hostId'] = '' @@ -115,7 +117,7 @@ class ViewBuilderV10(ViewBuilder): def _build_flavor(self, response, inst): if 'instance_type' in dict(inst): - response['flavorId'] = inst['instance_type'] + response['flavorId'] = inst['instance_type']['flavorid'] class ViewBuilderV11(ViewBuilder): @@ -134,7 +136,7 @@ class ViewBuilderV11(ViewBuilder): def _build_flavor(self, response, inst): if "instance_type" in dict(inst): - flavor_id = inst["instance_type"] + flavor_id = inst["instance_type"]['flavorid'] flavor_ref = self.flavor_builder.generate_href(flavor_id) response["flavorRef"] = flavor_ref diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 227ffecdc..af73d8f6d 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -13,7 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import json +import urlparse + +from nova import crypto from nova import db +from nova import exception from nova import flags from nova import log as logging from nova.api.openstack import common @@ -21,6 +26,12 @@ from nova.scheduler import api FLAGS = flags.FLAGS +flags.DEFINE_string('build_plan_encryption_key', + None, + '128bit (hex) encryption key for scheduler build plans.') + + +LOG = logging.getLogger('nova.api.openstack.zones') def _filter_keys(item, keys): @@ -97,3 +108,35 @@ class Controller(common.OpenstackController): zone_id = int(id) zone = api.zone_update(context, zone_id, env["zone"]) return dict(zone=_scrub_zone(zone)) + + def select(self, req): + """Returns a weighted list of costs to create instances + of desired capabilities.""" + ctx = req.environ['nova.context'] + qs = req.environ['QUERY_STRING'] + param_dict = urlparse.parse_qs(qs) + param_dict.pop("fresh", None) + # parse_qs returns a dict where the values are lists, + # since query strings can have multiple values for the + # same key. We need to convert that to single values. + for key in param_dict: + param_dict[key] = param_dict[key][0] + build_plan = api.select(ctx, specs=param_dict) + cooked = self._scrub_build_plan(build_plan) + return {"weights": cooked} + + def _scrub_build_plan(self, build_plan): + """Remove all the confidential data and return a sanitized + version of the build plan. Include an encrypted full version + of the weighting entry so we can get back to it later.""" + if not FLAGS.build_plan_encryption_key: + raise exception.FlagNotSet(flag='build_plan_encryption_key') + + encryptor = crypto.encryptor(FLAGS.build_plan_encryption_key) + cooked = [] + for entry in build_plan: + json_entry = json.dumps(entry) + cipher_text = encryptor(json_entry) + cooked.append(dict(weight=entry['weight'], + blob=cipher_text)) + return cooked diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index d1e3f2ed5..a429b7812 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -81,7 +81,7 @@ class DbDriver(object): user_ref = db.user_create(context.get_admin_context(), values) return self._db_user_to_auth_user(user_ref) except exception.Duplicate, e: - raise exception.Duplicate(_('User %s already exists') % name) + raise exception.UserExists(user=name) def _db_user_to_auth_user(self, user_ref): return {'id': user_ref['id'], @@ -103,9 +103,7 @@ class DbDriver(object): """Create a project""" manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound(_("Project can't be created because " - "manager %s doesn't exist") - % manager_uid) + raise exception.UserNotFound(user_id=manager_uid) # description is a required attribute if description is None: @@ -115,13 +113,11 @@ class DbDriver(object): # on to create the project. This way we won't have to destroy # the project again because a user turns out to be invalid. members = set([manager]) - if member_uids != None: + if member_uids is not None: for member_uid in member_uids: member = db.user_get(context.get_admin_context(), member_uid) if not member: - raise exception.NotFound(_("Project can't be created " - "because user %s doesn't exist") - % member_uid) + raise exception.UserNotFound(user_id=member_uid) members.add(member) values = {'id': name, @@ -132,8 +128,7 @@ class DbDriver(object): try: project = db.project_create(context.get_admin_context(), values) except exception.Duplicate: - raise exception.Duplicate(_("Project can't be created because " - "project %s already exists") % name) + raise exception.ProjectExists(project=name) for member in members: db.project_add_member(context.get_admin_context(), @@ -154,9 +149,7 @@ class DbDriver(object): if manager_uid: manager = db.user_get(context.get_admin_context(), manager_uid) if not manager: - raise exception.NotFound(_("Project can't be modified because " - "manager %s doesn't exist") % - manager_uid) + raise exception.UserNotFound(user_id=manager_uid) values['project_manager'] = manager['id'] if description: values['description'] = description @@ -244,8 +237,8 @@ class DbDriver(object): def _validate_user_and_project(self, user_id, project_id): user = db.user_get(context.get_admin_context(), user_id) if not user: - raise exception.NotFound(_('User "%s" not found') % user_id) + raise exception.UserNotFound(user_id=user_id) project = db.project_get(context.get_admin_context(), project_id) if not project: - raise exception.NotFound(_('Project "%s" not found') % project_id) + raise exception.ProjectNotFound(project_id=project_id) return user, project diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index fcac55510..3f8432851 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -171,7 +171,7 @@ class LdapDriver(object): def create_user(self, name, access_key, secret_key, is_admin): """Create a user""" if self.__user_exists(name): - raise exception.Duplicate(_("LDAP user %s already exists") % name) + raise exception.LDAPUserExists(user=name) if FLAGS.ldap_user_modify_only: if self.__ldap_user_exists(name): # Retrieve user by name @@ -202,8 +202,7 @@ class LdapDriver(object): self.conn.modify_s(self.__uid_to_dn(name), attr) return self.get_user(name) else: - raise exception.NotFound(_("LDAP object for %s doesn't exist") - % name) + raise exception.LDAPUserNotFound(user_id=name) else: attr = [ ('objectclass', ['person', @@ -226,12 +225,9 @@ class LdapDriver(object): description=None, member_uids=None): """Create a project""" if self.__project_exists(name): - raise exception.Duplicate(_("Project can't be created because " - "project %s already exists") % name) + raise exception.ProjectExists(project=name) if not self.__user_exists(manager_uid): - raise exception.NotFound(_("Project can't be created because " - "manager %s doesn't exist") - % manager_uid) + raise exception.LDAPUserNotFound(user_id=manager_uid) manager_dn = self.__uid_to_dn(manager_uid) # description is a required attribute if description is None: @@ -240,9 +236,7 @@ class LdapDriver(object): if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound(_("Project can't be created " - "because user %s doesn't exist") - % member_uid) + raise exception.LDAPUserNotFound(user_id=member_uid) members.append(self.__uid_to_dn(member_uid)) # always add the manager as a member because members is required if not manager_dn in members: @@ -265,9 +259,7 @@ class LdapDriver(object): attr = [] if manager_uid: if not self.__user_exists(manager_uid): - raise exception.NotFound(_("Project can't be modified because " - "manager %s doesn't exist") - % manager_uid) + raise exception.LDAPUserNotFound(user_id=manager_uid) manager_dn = self.__uid_to_dn(manager_uid) attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute, manager_dn)) @@ -347,7 +339,7 @@ class LdapDriver(object): def delete_user(self, uid): """Delete a user""" if not self.__user_exists(uid): - raise exception.NotFound(_("User %s doesn't exist") % uid) + raise exception.LDAPUserNotFound(user_id=uid) self.__remove_from_all(uid) if FLAGS.ldap_user_modify_only: # Delete attributes @@ -471,15 +463,12 @@ class LdapDriver(object): description, member_uids=None): """Create a group""" if self.__group_exists(group_dn): - raise exception.Duplicate(_("Group can't be created because " - "group %s already exists") % name) + raise exception.LDAPGroupExists(group=name) members = [] if member_uids is not None: for member_uid in member_uids: if not self.__user_exists(member_uid): - raise exception.NotFound(_("Group can't be created " - "because user %s doesn't exist") - % member_uid) + raise exception.LDAPUserNotFound(user_id=member_uid) members.append(self.__uid_to_dn(member_uid)) dn = self.__uid_to_dn(uid) if not dn in members: @@ -494,8 +483,7 @@ class LdapDriver(object): def __is_in_group(self, uid, group_dn): """Check if user is in group""" if not self.__user_exists(uid): - raise exception.NotFound(_("User %s can't be searched in group " - "because the user doesn't exist") % uid) + raise exception.LDAPUserNotFound(user_id=uid) if not self.__group_exists(group_dn): return False res = self.__find_object(group_dn, @@ -506,29 +494,23 @@ class LdapDriver(object): def __add_to_group(self, uid, group_dn): """Add user to group""" if not self.__user_exists(uid): - raise exception.NotFound(_("User %s can't be added to the group " - "because the user doesn't exist") % uid) + raise exception.LDAPUserNotFound(user_id=uid) if not self.__group_exists(group_dn): - raise exception.NotFound(_("The group at dn %s doesn't exist") % - group_dn) + raise exception.LDAPGroupNotFound(group_id=group_dn) if self.__is_in_group(uid, group_dn): - raise exception.Duplicate(_("User %(uid)s is already a member of " - "the group %(group_dn)s") % locals()) + raise exception.LDAPMembershipExists(uid=uid, group_dn=group_dn) attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))] self.conn.modify_s(group_dn, attr) def __remove_from_group(self, uid, group_dn): """Remove user from group""" if not self.__group_exists(group_dn): - raise exception.NotFound(_("The group at dn %s doesn't exist") - % group_dn) + raise exception.LDAPGroupNotFound(group_id=group_dn) if not self.__user_exists(uid): - raise exception.NotFound(_("User %s can't be removed from the " - "group because the user doesn't exist") - % uid) + raise exception.LDAPUserNotFound(user_id=uid) if not self.__is_in_group(uid, group_dn): - raise exception.NotFound(_("User %s is not a member of the group") - % uid) + raise exception.LDAPGroupMembershipNotFound(user_id=uid, + group_id=group_dn) # NOTE(vish): remove user from group and any sub_groups sub_dns = self.__find_group_dns_with_member(group_dn, uid) for sub_dn in sub_dns: @@ -548,9 +530,7 @@ class LdapDriver(object): def __remove_from_all(self, uid): """Remove user from all roles and projects""" if not self.__user_exists(uid): - raise exception.NotFound(_("User %s can't be removed from all " - "because the user doesn't exist") - % uid) + raise exception.LDAPUserNotFound(user_id=uid) role_dns = self.__find_group_dns_with_member( FLAGS.role_project_subtree, uid) for role_dn in role_dns: @@ -563,8 +543,7 @@ class LdapDriver(object): def __delete_group(self, group_dn): """Delete Group""" if not self.__group_exists(group_dn): - raise exception.NotFound(_("Group at dn %s doesn't exist") - % group_dn) + raise exception.LDAPGroupNotFound(group_id=group_dn) self.conn.delete_s(group_dn) def __delete_roles(self, project_dn): diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 486845399..07235a2a7 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -223,6 +223,13 @@ class AuthManager(object): if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver) + if FLAGS.memcached_servers: + import memcache + else: + from nova import fakememcache as memcache + self.mc = memcache.Client(FLAGS.memcached_servers, + debug=0) + def authenticate(self, access, signature, params, verb='GET', server_string='127.0.0.1:8773', path='/', check_type='ec2', headers=None): @@ -268,10 +275,9 @@ class AuthManager(object): LOG.debug(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) LOG.debug('user: %r', user) - if user == None: + if user is None: LOG.audit(_("Failed authorization for access key %s"), access_key) - raise exception.NotFound(_('No user found for access key %s') - % access_key) + raise exception.AccessKeyNotFound(access_key=access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user @@ -280,13 +286,12 @@ class AuthManager(object): project_id = user.name project = self.get_project(project_id) - if project == None: + if project is None: pjid = project_id uname = user.name LOG.audit(_("failed authorization: no project named %(pjid)s" " (user=%(uname)s)") % locals()) - raise exception.NotFound(_('No project called %s could be found') - % project_id) + raise exception.ProjectNotFound(project_id=project_id) if not self.is_admin(user) and not self.is_project_member(user, project): uname = user.name @@ -295,28 +300,40 @@ class AuthManager(object): pjid = project.id LOG.audit(_("Failed authorization: user %(uname)s not admin" " and not member of project %(pjname)s") % locals()) - raise exception.NotFound(_('User %(uid)s is not a member of' - ' project %(pjid)s') % locals()) + raise exception.ProjectMembershipNotFound(project_id=pjid, + user_id=uid) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) - LOG.debug('user.secret: %s', user.secret) - LOG.debug('expected_signature: %s', expected_signature) - LOG.debug('signature: %s', signature) + LOG.debug(_('user.secret: %s'), user.secret) + LOG.debug(_('expected_signature: %s'), expected_signature) + LOG.debug(_('signature: %s'), signature) if signature != expected_signature: LOG.audit(_("Invalid signature for user %s"), user.name) - raise exception.NotAuthorized(_('Signature does not match')) + raise exception.InvalidSignature(signature=signature, + user=user) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode expected_signature = signer.Signer(user.secret.encode()).generate( params, verb, server_string, path) - LOG.debug('user.secret: %s', user.secret) - LOG.debug('expected_signature: %s', expected_signature) - LOG.debug('signature: %s', signature) + LOG.debug(_('user.secret: %s'), user.secret) + LOG.debug(_('expected_signature: %s'), expected_signature) + LOG.debug(_('signature: %s'), signature) if signature != expected_signature: + (addr_str, port_str) = utils.parse_server_string(server_string) + # If the given server_string contains port num, try without it. + if port_str != '': + host_only_signature = signer.Signer( + user.secret.encode()).generate(params, verb, + addr_str, path) + LOG.debug(_('host_only_signature: %s'), + host_only_signature) + if signature == host_only_signature: + return (user, project) LOG.audit(_("Invalid signature for user %s"), user.name) - raise exception.NotAuthorized(_('Signature does not match')) + raise exception.InvalidSignature(signature=signature, + user=user) return (user, project) def get_access_key(self, user, project): @@ -360,6 +377,27 @@ class AuthManager(object): if self.has_role(user, role): return True + def _build_mc_key(self, user, role, project=None): + key_parts = ['rolecache', User.safe_id(user), str(role)] + if project: + key_parts.append(Project.safe_id(project)) + return '-'.join(key_parts) + + def _clear_mc_key(self, user, role, project=None): + # NOTE(anthony): it would be better to delete the key + self.mc.set(self._build_mc_key(user, role, project), None) + + def _has_role(self, user, role, project=None): + mc_key = self._build_mc_key(user, role, project) + rslt = self.mc.get(mc_key) + if rslt is None: + with self.driver() as drv: + rslt = drv.has_role(user, role, project) + self.mc.set(mc_key, rslt) + return rslt + else: + return rslt + def has_role(self, user, role, project=None): """Checks existence of role for user @@ -383,24 +421,24 @@ class AuthManager(object): @rtype: bool @return: True if the user has the role. """ - with self.driver() as drv: - if role == 'projectmanager': - if not project: - raise exception.Error(_("Must specify project")) - return self.is_project_manager(user, project) + if role == 'projectmanager': + if not project: + raise exception.Error(_("Must specify project")) + return self.is_project_manager(user, project) + + global_role = self._has_role(User.safe_id(user), + role, + None) - global_role = drv.has_role(User.safe_id(user), - role, - None) - if not global_role: - return global_role + if not global_role: + return global_role - if not project or role in FLAGS.global_roles: - return global_role + if not project or role in FLAGS.global_roles: + return global_role - return drv.has_role(User.safe_id(user), - role, - Project.safe_id(project)) + return self._has_role(User.safe_id(user), + role, + Project.safe_id(project)) def add_role(self, user, role, project=None): """Adds role for user @@ -420,9 +458,9 @@ class AuthManager(object): @param project: Project in which to add local role. """ if role not in FLAGS.allowed_roles: - raise exception.NotFound(_("The %s role can not be found") % role) + raise exception.UserRoleNotFound(role_id=role) if project is not None and role in FLAGS.global_roles: - raise exception.NotFound(_("The %s role is global only") % role) + raise exception.GlobalRoleNotAllowed(role_id=role) uid = User.safe_id(user) pid = Project.safe_id(project) if project: @@ -432,6 +470,7 @@ class AuthManager(object): LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s") % locals()) with self.driver() as drv: + self._clear_mc_key(uid, role, pid) drv.add_role(uid, role, pid) def remove_role(self, user, role, project=None): @@ -460,6 +499,7 @@ class AuthManager(object): LOG.audit(_("Removing sitewide role %(role)s" " from user %(uid)s") % locals()) with self.driver() as drv: + self._clear_mc_key(uid, role, pid) drv.remove_role(uid, role, pid) @staticmethod @@ -646,9 +686,9 @@ class AuthManager(object): @rtype: User @return: The new user. """ - if access == None: + if access is None: access = str(uuid.uuid4()) - if secret == None: + if secret is None: secret = str(uuid.uuid4()) with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index dc6f55af2..7844d31e1 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -101,12 +101,13 @@ class CloudPipe(object): key_name = self.setup_key_pair(ctxt) group_name = self.setup_security_group(ctxt) + ec2_id = self.controller.image_ec2_id(FLAGS.vpn_image_id) reservation = self.controller.run_instances(ctxt, user_data=self.get_encoded_zip(project_id), max_count=1, min_count=1, instance_type='m1.tiny', - image_id=FLAGS.vpn_image_id, + image_id=ec2_id, key_name=key_name, security_group=[group_name]) diff --git a/nova/compute/api.py b/nova/compute/api.py index bca6863d6..e9e8e10e7 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -16,11 +16,10 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" +"""Handles all requests relating to instances (guest vms).""" import datetime +import eventlet import re import time @@ -34,6 +33,7 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import instance_types +from nova.compute import power_state from nova.scheduler import api as scheduler_api from nova.db import base @@ -43,6 +43,8 @@ LOG = logging.getLogger('nova.compute.api') FLAGS = flags.FLAGS flags.DECLARE('vncproxy_topic', 'nova.vnc') +flags.DEFINE_integer('find_host_timeout', 30, + 'Timeout after NN seconds when looking for a host.') def generate_default_hostname(instance_id): @@ -86,68 +88,78 @@ class API(base.Base): {"method": "get_network_topic", "args": {'fake': 1}}) def _check_injected_file_quota(self, context, injected_files): - """ - Enforce quota limits on injected files + """Enforce quota limits on injected files. + + Raises a QuotaError if any limit is exceeded. - Raises a QuotaError if any limit is exceeded """ if injected_files is None: return - limit = quota.allowed_injected_files(context) + limit = quota.allowed_injected_files(context, len(injected_files)) if len(injected_files) > limit: raise quota.QuotaError(code="OnsetFileLimitExceeded") path_limit = quota.allowed_injected_file_path_bytes(context) - content_limit = quota.allowed_injected_file_content_bytes(context) for path, content in injected_files: if len(path) > path_limit: raise quota.QuotaError(code="OnsetFilePathLimitExceeded") + content_limit = quota.allowed_injected_file_content_bytes( + context, len(content)) if len(content) > content_limit: raise quota.QuotaError(code="OnsetFileContentLimitExceeded") - def create(self, context, instance_type, - image_id, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, - display_name='', display_description='', - key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, metadata=[], - injected_files=None): - """Create the number of instances requested if quota and - other arguments check out ok.""" - - type_data = instance_types.get_instance_type(instance_type) - num_instances = quota.allowed_instances(context, max_count, type_data) - if num_instances < min_count: - pid = context.project_id - LOG.warn(_("Quota exceeeded for %(pid)s," - " tried to run %(min_count)s instances") % locals()) - raise quota.QuotaError(_("Instance quota exceeded. You can only " - "run %s more instances of this type.") % - num_instances, "InstanceLimitExceeded") - + def _check_metadata_properties_quota(self, context, metadata={}): + """Enforce quota limits on metadata properties.""" num_metadata = len(metadata) quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: pid = context.project_id - msg = (_("Quota exceeeded for %(pid)s," - " tried to set %(num_metadata)s metadata properties") - % locals()) + msg = _("Quota exceeeded for %(pid)s, tried to set " + "%(num_metadata)s metadata properties") % locals() LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") # Because metadata is stored in the DB, we hard-code the size limits # In future, we may support more variable length strings, so we act # as if this is quota-controlled for forwards compatibility - for metadata_item in metadata: - k = metadata_item['key'] - v = metadata_item['value'] + for k, v in metadata.iteritems(): if len(k) > 255 or len(v) > 255: pid = context.project_id - msg = (_("Quota exceeeded for %(pid)s," - " metadata property key or value too long") - % locals()) + msg = _("Quota exceeeded for %(pid)s, metadata property " + "key or value too long") % locals() LOG.warn(msg) raise quota.QuotaError(msg, "MetadataLimitExceeded") + def create(self, context, instance_type, + image_id, kernel_id=None, ramdisk_id=None, + min_count=1, max_count=1, + display_name='', display_description='', + key_name=None, key_data=None, security_group='default', + availability_zone=None, user_data=None, metadata={}, + injected_files=None, + admin_password=None): + """Create the number and type of instances requested. + + Verifies that quota and other arguments are valid. + + """ + if not instance_type: + instance_type = instance_types.get_default_instance_type() + + num_instances = quota.allowed_instances(context, max_count, + instance_type) + if num_instances < min_count: + pid = context.project_id + LOG.warn(_("Quota exceeeded for %(pid)s," + " tried to run %(min_count)s instances") % locals()) + if num_instances <= 0: + message = _("Instance quota exceeded. You cannot run any " + "more instances of this type.") + else: + message = _("Instance quota exceeded. You can only run %s " + "more instances of this type.") % num_instances + raise quota.QuotaError(message, "InstanceLimitExceeded") + + self._check_metadata_properties_quota(context, metadata) self._check_injected_file_quota(context, injected_files) image = self.image_service.show(context, image_id) @@ -201,10 +213,10 @@ class API(base.Base): 'user_id': context.user_id, 'project_id': context.project_id, 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()), - 'instance_type': instance_type, - 'memory_mb': type_data['memory_mb'], - 'vcpus': type_data['vcpus'], - 'local_gb': type_data['local_gb'], + 'instance_type_id': instance_type['id'], + 'memory_mb': instance_type['memory_mb'], + 'vcpus': instance_type['vcpus'], + 'local_gb': instance_type['local_gb'], 'display_name': display_name, 'display_description': display_description, 'user_data': user_data or '', @@ -235,7 +247,7 @@ class API(base.Base): # Set sane defaults if not specified updates = dict(hostname=self.hostname_factory(instance_id)) if (not hasattr(instance, 'display_name') or - instance.display_name == None): + instance.display_name is None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) @@ -245,13 +257,21 @@ class API(base.Base): uid = context.user_id LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's" " instance %(instance_id)s") % locals()) + + # NOTE(sandy): For now we're just going to pass in the + # instance_type record to the scheduler. In a later phase + # we'll be ripping this whole for-loop out and deferring the + # creation of the Instance record. At that point all this will + # change. rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, + "instance_type": instance_type, "availability_zone": availability_zone, - "injected_files": injected_files}}) + "injected_files": injected_files, + "admin_password": admin_password}}) for group_id in security_groups: self.trigger_security_group_members_refresh(elevated, group_id) @@ -259,8 +279,7 @@ class API(base.Base): return [dict(x.iteritems()) for x in instances] def has_finished_migration(self, context, instance_id): - """Retrieves whether or not a finished migration exists for - an instance""" + """Returns true if an instance has a finished migration.""" try: db.migration_get_by_instance_and_status(context, instance_id, 'finished') @@ -269,8 +288,10 @@ class API(base.Base): return False def ensure_default_security_group(self, context): - """ Create security group for the security context if it - does not already exist + """Ensure that a context has a security group. + + Creates a security group for the security context if it does not + already exist. :param context: the security context @@ -286,7 +307,7 @@ class API(base.Base): db.security_group_create(context, values) def trigger_security_group_rules_refresh(self, context, security_group_id): - """Called when a rule is added to or removed from a security_group""" + """Called when a rule is added to or removed from a security_group.""" security_group = self.db.security_group_get(context, security_group_id) @@ -302,11 +323,12 @@ class API(base.Base): "args": {"security_group_id": security_group.id}}) def trigger_security_group_members_refresh(self, context, group_id): - """Called when a security group gains a new or loses a member + """Called when a security group gains a new or loses a member. Sends an update request to each compute node for whom this is - relevant.""" + relevant. + """ # First, we get the security group rules that reference this group as # the grantee.. security_group_rules = \ @@ -361,7 +383,7 @@ class API(base.Base): as data fields of the instance to be updated - :retval None + :returns: None """ rv = self.db.instance_update(context, instance_id, kwargs) @@ -369,6 +391,7 @@ class API(base.Base): @scheduler_api.reroute_compute("delete") def delete(self, context, instance_id): + """Terminate an instance.""" LOG.debug(_("Going to try to terminate %s"), instance_id) try: instance = self.get(context, instance_id) @@ -377,11 +400,15 @@ class API(base.Base): instance_id) raise - if (instance['state_description'] == 'terminating'): + if instance['state_description'] == 'terminating': LOG.warning(_("Instance %s is already being terminated"), instance_id) return + if instance['state_description'] == 'migrating': + LOG.warning(_("Instance %s is being migrated"), instance_id) + return + self.update(context, instance['id'], state_description='terminating', @@ -396,22 +423,28 @@ class API(base.Base): self.db.instance_destroy(context, instance_id) def get(self, context, instance_id): - """Get a single instance with the given ID.""" + """Get a single instance with the given instance_id.""" rv = self.db.instance_get(context, instance_id) return dict(rv.iteritems()) @scheduler_api.reroute_compute("get") def routing_get(self, context, instance_id): - """Use this method instead of get() if this is the only - operation you intend to to. It will route to novaclient.get - if the instance is not found.""" + """A version of get with special routing characteristics. + + Use this method instead of get() if this is the only operation you + intend to to. It will route to novaclient.get if the instance is not + found. + + """ return self.get(context, instance_id) def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): - """Get all instances, possibly filtered by one of the - given parameters. If there is no filter and the context is - an admin, it will retreive all instances in the system. + """Get all instances filtered by one of the given parameters. + + If there is no filter and the context is an admin, it will retreive + all instances in the system. + """ if reservation_id is not None: return self.db.instance_get_all_by_reservation( @@ -440,7 +473,8 @@ class API(base.Base): :param params: Optional dictionary of arguments to be passed to the compute worker - :retval None + :returns: None + """ if not params: params = {} @@ -459,7 +493,7 @@ class API(base.Base): :param params: Optional dictionary of arguments to be passed to the compute worker - :retval: Result returned by compute worker + :returns: Result returned by compute worker """ if not params: params = {} @@ -472,13 +506,25 @@ class API(base.Base): return rpc.call(context, queue, kwargs) def _cast_scheduler_message(self, context, args): - """Generic handler for RPC calls to the scheduler""" + """Generic handler for RPC calls to the scheduler.""" rpc.cast(context, FLAGS.scheduler_topic, args) + def _find_host(self, context, instance_id): + """Find the host associated with an instance.""" + for attempts in xrange(FLAGS.find_host_timeout): + instance = self.get(context, instance_id) + host = instance["host"] + if host: + return host + time.sleep(1) + raise exception.Error(_("Unable to find host for Instance %s") + % instance_id) + def snapshot(self, context, instance_id, name): """Snapshot the given instance. - :retval: A dict containing image metadata + :returns: A dict containing image metadata + """ properties = {'instance_id': str(instance_id), 'user_id': str(context.user_id)} @@ -494,14 +540,41 @@ class API(base.Base): """Reboot the given instance.""" self._cast_compute_message('reboot_instance', context, instance_id) + def rebuild(self, context, instance_id, image_id, metadata=None, + files_to_inject=None): + """Rebuild the given instance with the provided metadata.""" + instance = db.api.instance_get(context, instance_id) + + if instance["state"] == power_state.BUILDING: + msg = _("Instance already building") + raise exception.BuildInProgress(msg) + + metadata = metadata or {} + self._check_metadata_properties_quota(context, metadata) + + files_to_inject = files_to_inject or [] + self._check_injected_file_quota(context, files_to_inject) + + self.db.instance_update(context, instance_id, {"metadata": metadata}) + + rebuild_params = { + "image_id": image_id, + "injected_files": files_to_inject, + } + + self._cast_compute_message('rebuild_instance', + context, + instance_id, + params=rebuild_params) + def revert_resize(self, context, instance_id): - """Reverts a resize, deleting the 'new' instance in the process""" + """Reverts a resize, deleting the 'new' instance in the process.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.NotFound(_("No finished migrations found for " - "instance")) + raise exception.MigrationNotFoundByStatus(instance_id=instance_id, + status='finished') params = {'migration_id': migration_ref['id']} self._cast_compute_message('revert_resize', context, instance_id, @@ -510,14 +583,13 @@ class API(base.Base): {'status': 'reverted'}) def confirm_resize(self, context, instance_id): - """Confirms a migration/resize, deleting the 'old' instance in the - process.""" + """Confirms a migration/resize and deletes the 'old' instance.""" context = context.elevated() migration_ref = self.db.migration_get_by_instance_and_status(context, instance_id, 'finished') if not migration_ref: - raise exception.NotFound(_("No finished migrations found for " - "instance")) + raise exception.MigrationNotFoundByStatus(instance_id=instance_id, + status='finished') instance_ref = self.db.instance_get(context, instance_id) params = {'migration_id': migration_ref['id']} self._cast_compute_message('confirm_resize', context, instance_id, @@ -531,8 +603,7 @@ class API(base.Base): def resize(self, context, instance_id, flavor_id): """Resize a running instance.""" instance = self.db.instance_get(context, instance_id) - current_instance_type = self.db.instance_type_get_by_name( - context, instance['instance_type']) + current_instance_type = instance['instance_type'] new_instance_type = self.db.instance_type_get_by_flavor_id( context, flavor_id) @@ -572,10 +643,9 @@ class API(base.Base): @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" - return self._call_compute_message( - "get_diagnostics", - context, - instance_id) + return self._call_compute_message("get_diagnostics", + context, + instance_id) def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" @@ -583,12 +653,12 @@ class API(base.Base): @scheduler_api.reroute_compute("suspend") def suspend(self, context, instance_id): - """suspend the instance with instance_id""" + """Suspend the given instance.""" self._cast_compute_message('suspend_instance', context, instance_id) @scheduler_api.reroute_compute("resume") def resume(self, context, instance_id): - """resume the instance with instance_id""" + """Resume the given instance.""" self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") @@ -603,15 +673,19 @@ class API(base.Base): def set_admin_password(self, context, instance_id, password=None): """Set the root/admin password for the given instance.""" - self._cast_compute_message('set_admin_password', context, instance_id, - password) + host = self._find_host(context, instance_id) + + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "set_admin_password", + "args": {"instance_id": instance_id, "new_pass": password}}) def inject_file(self, context, instance_id): """Write a file to the given instance.""" self._cast_compute_message('inject_file', context, instance_id) def get_ajax_console(self, context, instance_id): - """Get a url to an AJAX Console""" + """Get a url to an AJAX Console.""" output = self._call_compute_message('get_ajax_console', context, instance_id) @@ -620,7 +694,7 @@ class API(base.Base): 'args': {'token': output['token'], 'host': output['host'], 'port': output['port']}}) return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url, - output['token'])} + output['token'])} def get_vnc_console(self, context, instance_id): """Get a url to a VNC Console.""" @@ -642,39 +716,34 @@ class API(base.Base): 'portignore')} def get_console_output(self, context, instance_id): - """Get console output for an an instance""" + """Get console output for an an instance.""" return self._call_compute_message('get_console_output', context, instance_id) def lock(self, context, instance_id): - """lock the instance with instance_id""" + """Lock the given instance.""" self._cast_compute_message('lock_instance', context, instance_id) def unlock(self, context, instance_id): - """unlock the instance with instance_id""" + """Unlock the given instance.""" self._cast_compute_message('unlock_instance', context, instance_id) def get_lock(self, context, instance_id): - """return the boolean state of (instance with instance_id)'s lock""" + """Return the boolean state of given instance's lock.""" instance = self.get(context, instance_id) return instance['locked'] def reset_network(self, context, instance_id): - """ - Reset networking on the instance. - - """ + """Reset networking on the instance.""" self._cast_compute_message('reset_network', context, instance_id) def inject_network_info(self, context, instance_id): - """ - Inject network info for the instance. - - """ + """Inject network info for the instance.""" self._cast_compute_message('inject_network_info', context, instance_id) def attach_volume(self, context, instance_id, volume_id, device): + """Attach an existing volume to an existing instance.""" if not re.match("^/dev/[a-z]d[a-z]+$", device): raise exception.ApiError(_("Invalid device specified: %s. " "Example device: /dev/vdb") % device) @@ -689,6 +758,7 @@ class API(base.Base): "mountpoint": device}}) def detach_volume(self, context, volume_id): + """Detach a volume from an instance.""" instance = self.db.volume_get_instance(context.elevated(), volume_id) if not instance: raise exception.ApiError(_("Volume isn't attached to anything!")) @@ -702,6 +772,7 @@ class API(base.Base): return instance def associate_floating_ip(self, context, instance_id, address): + """Associate a floating ip with an instance.""" instance = self.get(context, instance_id) self.network_api.associate_floating_ip(context, floating_ip=address, @@ -713,11 +784,14 @@ class API(base.Base): return dict(rv.iteritems()) def delete_instance_metadata(self, context, instance_id, key): - """Delete the given metadata item""" + """Delete the given metadata item from an instance.""" self.db.instance_metadata_delete(context, instance_id, key) def update_or_create_instance_metadata(self, context, instance_id, metadata): - """Updates or creates instance metadata""" + """Updates or creates instance metadata.""" + combined_metadata = self.get_instance_metadata(context, instance_id) + combined_metadata.update(metadata) + self._check_metadata_properties_quota(context, combined_metadata) self.db.instance_metadata_update_or_create(context, instance_id, metadata) diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py index fa02a5dfa..1275a6fdd 100644 --- a/nova/compute/instance_types.py +++ b/nova/compute/instance_types.py @@ -18,9 +18,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -The built-in instance properties. -""" +"""Built-in instance properties.""" from nova import context from nova import db @@ -34,18 +32,16 @@ LOG = logging.getLogger('nova.instance_types') def create(name, memory, vcpus, local_gb, flavorid, swap=0, rxtx_quota=0, rxtx_cap=0): - """Creates instance types / flavors - arguments: name memory vcpus local_gb flavorid swap rxtx_quota rxtx_cap - """ + """Creates instance types.""" for option in [memory, vcpus, local_gb, flavorid]: try: int(option) except ValueError: - raise exception.InvalidInputException( - _("create arguments must be positive integers")) + raise exception.InvalidInput(reason=_("create arguments must " + "be positive integers")) if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0): - raise exception.InvalidInputException( - _("create arguments must be positive integers")) + raise exception.InvalidInput(reason=_("create arguments must " + "be positive integers")) try: db.instance_type_create( @@ -59,83 +55,88 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0, rxtx_quota=rxtx_quota, rxtx_cap=rxtx_cap)) except exception.DBError, e: - LOG.exception(_('DB error: %s' % e)) - raise exception.ApiError(_("Cannot create instance type: %s" % name)) + LOG.exception(_('DB error: %s') % e) + raise exception.ApiError(_("Cannot create instance_type with " + "name %(name)s and flavorid %(flavorid)s") + % locals()) def destroy(name): - """Marks instance types / flavors as deleted - arguments: name""" - if name == None: - raise exception.InvalidInputException(_("No instance type specified")) + """Marks instance types as deleted.""" + if name is None: + raise exception.InvalidInstanceType(instance_type=name) else: try: db.instance_type_destroy(context.get_admin_context(), name) except exception.NotFound: - LOG.exception(_('Instance type %s not found for deletion' % name)) - raise exception.ApiError(_("Unknown instance type: %s" % name)) + LOG.exception(_('Instance type %s not found for deletion') % name) + raise exception.ApiError(_("Unknown instance type: %s") % name) def purge(name): - """Removes instance types / flavors from database - arguments: name""" - if name == None: - raise exception.InvalidInputException(_("No instance type specified")) + """Removes instance types from database.""" + if name is None: + raise exception.InvalidInstanceType(instance_type=name) else: try: db.instance_type_purge(context.get_admin_context(), name) except exception.NotFound: - LOG.exception(_('Instance type %s not found for purge' % name)) - raise exception.ApiError(_("Unknown instance type: %s" % name)) + LOG.exception(_('Instance type %s not found for purge') % name) + raise exception.ApiError(_("Unknown instance type: %s") % name) def get_all_types(inactive=0): - """Retrieves non-deleted instance_types. - Pass true as argument if you want deleted instance types returned also.""" + """Get all non-deleted instance_types. + + Pass true as argument if you want deleted instance types returned also. + + """ return db.instance_type_get_all(context.get_admin_context(), inactive) -def get_all_flavors(): - """retrieves non-deleted flavors. alias for instance_types.get_all_types(). - Pass true as argument if you want deleted instance types returned also.""" - return get_all_types(context.get_admin_context()) +get_all_flavors = get_all_types -def get_instance_type(name): - """Retrieves single instance type by name""" - if name is None: - return FLAGS.default_instance_type +def get_default_instance_type(): + """Get the default instance type.""" + name = FLAGS.default_instance_type try: - ctxt = context.get_admin_context() - inst_type = db.instance_type_get_by_name(ctxt, name) - return inst_type + return get_instance_type_by_name(name) except exception.DBError: - raise exception.ApiError(_("Unknown instance type: %s" % name)) + raise exception.ApiError(_("Unknown instance type: %s") % name) -def get_by_type(instance_type): - """retrieve instance type name""" - if instance_type is None: - return FLAGS.default_instance_type +def get_instance_type(id): + """Retrieves single instance type by id.""" + if id is None: + return get_default_instance_type() + try: + ctxt = context.get_admin_context() + return db.instance_type_get_by_id(ctxt, id) + except exception.DBError: + raise exception.ApiError(_("Unknown instance type: %s") % name) + +def get_instance_type_by_name(name): + """Retrieves single instance type by name.""" + if name is None: + return get_default_instance_type() try: ctxt = context.get_admin_context() - inst_type = db.instance_type_get_by_name(ctxt, instance_type) - return inst_type['name'] - except exception.DBError, e: - LOG.exception(_('DB error: %s' % e)) - raise exception.ApiError(_("Unknown instance type: %s" %\ - instance_type)) + return db.instance_type_get_by_name(ctxt, name) + except exception.DBError: + raise exception.ApiError(_("Unknown instance type: %s") % name) -def get_by_flavor_id(flavor_id): - """retrieve instance type's name by flavor_id""" +# TODO(termie): flavor-specific code should probably be in the API that uses +# flavors. +def get_instance_type_by_flavor_id(flavor_id): + """Retrieve instance type by flavor_id.""" if flavor_id is None: - return FLAGS.default_instance_type + return get_default_instance_type() try: ctxt = context.get_admin_context() - flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id) - return flavor['name'] + return db.instance_type_get_by_flavor_id(ctxt, flavor_id) except exception.DBError, e: - LOG.exception(_('DB error: %s' % e)) - raise exception.ApiError(_("Unknown flavor: %s" % flavor_id)) + LOG.exception(_('DB error: %s') % e) + raise exception.ApiError(_("Unknown flavor: %s") % flavor_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 916c1db5e..ff7aeb053 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -17,8 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all processes relating to instances (guest vms). +"""Handles all processes relating to instances (guest vms). The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that handles RPC calls relating to creating instances. It is responsible for @@ -33,15 +32,15 @@ terminating it. by :func:`nova.utils.import_object` :volume_manager: Name of class that handles persistent storage, loaded by :func:`nova.utils.import_object` + """ import datetime import os -import random -import string import socket import sys import tempfile +import time import functools from eventlet import greenthread @@ -50,11 +49,14 @@ from nova import exception from nova import flags from nova import log as logging from nova import manager +from nova import network from nova import rpc from nova import utils +from nova import volume from nova.compute import power_state from nova.virt import driver + FLAGS = flags.FLAGS flags.DEFINE_string('instances_path', '$state_path/instances', 'where instances are stored on disk') @@ -73,20 +75,18 @@ flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") +flags.DEFINE_bool('auto_assign_floating_ip', False, + 'Autoassigning floating ip to VM') +flags.DEFINE_integer('host_state_interval', 120, + 'Interval in seconds for querying the host status') LOG = logging.getLogger('nova.compute.manager') def checks_instance_lock(function): - """ - decorator used for preventing action against locked instances - unless, of course, you happen to be admin - - """ - + """Decorator to prevent action against locked instances for non-admins.""" @functools.wraps(function) def decorated_function(self, context, instance_id, *args, **kwargs): - LOG.info(_("check_instance_lock: decorating: |%s|"), function, context=context) LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|" @@ -112,7 +112,6 @@ def checks_instance_lock(function): class ComputeManager(manager.SchedulerDependentManager): - """Manages the running instances from creation to destruction.""" def __init__(self, compute_driver=None, *args, **kwargs): @@ -132,37 +131,55 @@ class ComputeManager(manager.SchedulerDependentManager): self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) + self.network_api = network.API() + self._last_host_check = 0 super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Initialization for a standalone compute service.""" self.driver.init_host(host=self.host) - def _update_state(self, context, instance_id): + def _update_state(self, context, instance_id, state=None): """Update the state of an instance from the driver info.""" - # FIXME(ja): include other fields from state? instance_ref = self.db.instance_get(context, instance_id) - try: - info = self.driver.get_info(instance_ref['name']) - state = info['state'] - except exception.NotFound: - state = power_state.FAILED + + if state is None: + try: + info = self.driver.get_info(instance_ref['name']) + except exception.NotFound: + info = None + + if info is not None: + state = info['state'] + else: + state = power_state.FAILED + self.db.instance_set_state(context, instance_id, state) + def _update_launched_at(self, context, instance_id, launched_at=None): + """Update the launched_at parameter of the given instance.""" + data = {'launched_at': launched_at or datetime.datetime.utcnow()} + self.db.instance_update(context, instance_id, data) + + def _update_image_id(self, context, instance_id, image_id): + """Update the image_id for the given instance.""" + data = {'image_id': image_id} + self.db.instance_update(context, instance_id, data) + def get_console_topic(self, context, **kwargs): - """Retrieves the console host for a project on this host - Currently this is just set in the flags for each compute - host.""" + """Retrieves the console host for a project on this host. + + Currently this is just set in the flags for each compute host. + + """ #TODO(mdragon): perhaps make this variable by console_type? return self.db.queue_get_for(context, FLAGS.console_topic, FLAGS.console_host) def get_network_topic(self, context, **kwargs): - """Retrieves the network host for a project on this host""" + """Retrieves the network host for a project on this host.""" # TODO(vish): This method should be memoized. This will make # the call to get_network_host cheaper, so that # it can pas messages instead of checking the db @@ -179,15 +196,23 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.get_console_pool_info(console_type) @exception.wrap_exception - def refresh_security_group_rules(self, context, - security_group_id, **kwargs): - """This call passes straight through to the virtualization driver.""" + def refresh_security_group_rules(self, context, security_group_id, + **kwargs): + """Tell the virtualization driver to refresh security group rules. + + Passes straight through to the virtualization driver. + + """ return self.driver.refresh_security_group_rules(security_group_id) @exception.wrap_exception def refresh_security_group_members(self, context, security_group_id, **kwargs): - """This call passes straight through to the virtualization driver.""" + """Tell the virtualization driver to refresh security group members. + + Passes straight through to the virtualization driver. + + """ return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception @@ -201,6 +226,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_ref.injected_files = kwargs.get('injected_files', []) + instance_ref.admin_pass = kwargs.get('admin_password', None) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, @@ -214,7 +240,7 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id + is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id) # NOTE(vish): This could be a cast because we don't do anything # with the address currently, but I'm leaving it as # a call to ensure that network setup completes. We @@ -230,31 +256,36 @@ class ComputeManager(manager.SchedulerDependentManager): instance_id) # TODO(vish) check to make sure the availability zone matches - self.db.instance_set_state(context, - instance_id, - power_state.NOSTATE, - 'spawning') + self._update_state(context, instance_id, power_state.BUILDING) try: self.driver.spawn(instance_ref) - now = datetime.datetime.utcnow() - self.db.instance_update(context, - instance_id, - {'launched_at': now}) - except Exception: # pylint: disable=W0702 - LOG.exception(_("Instance '%s' failed to spawn. Is virtualization" - " enabled in the BIOS?"), instance_id, - context=context) - self.db.instance_set_state(context, - instance_id, - power_state.SHUTDOWN) - + except Exception as ex: # pylint: disable=W0702 + msg = _("Instance '%(instance_id)s' failed to spawn. Is " + "virtualization enabled in the BIOS? Details: " + "%(ex)s") % locals() + LOG.exception(msg) + + if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip: + public_ip = self.network_api.allocate_floating_ip(context) + + self.db.floating_ip_set_auto_assigned(context, public_ip) + fixed_ip = self.db.fixed_ip_get_by_address(context, address) + floating_ip = self.db.floating_ip_get_by_address(context, + public_ip) + + self.network_api.associate_floating_ip(context, + floating_ip, + fixed_ip, + affect_auto_assigned=True) + + self._update_launched_at(context, instance_id) self._update_state(context, instance_id) @exception.wrap_exception @checks_instance_lock def terminate_instance(self, context, instance_id): - """Terminate an instance on this machine.""" + """Terminate an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Terminating instance %s"), instance_id, context=context) @@ -269,13 +300,17 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. - network_topic = self.db.queue_get_for(context, - FLAGS.network_topic, - floating_ip['host']) - rpc.cast(context, - network_topic, - {"method": "disassociate_floating_ip", - "args": {"floating_address": address}}) + self.network_api.disassociate_floating_ip(context, + address, + True) + if (FLAGS.auto_assign_floating_ip + and floating_ip.get('auto_assigned')): + LOG.debug(_("Deallocating floating ip %s"), + floating_ip['address'], + context=context) + self.network_api.release_floating_ip(context, + address, + True) address = fixed_ip['address'] if address: @@ -301,8 +336,35 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock + def rebuild_instance(self, context, instance_id, image_id): + """Destroy and re-make this instance. + + A 'rebuild' effectively purges all existing data from the system and + remakes the VM with given 'metadata' and 'personalities'. + + :param context: `nova.RequestContext` object + :param instance_id: Instance identifier (integer) + :param image_id: Image identifier (integer) + """ + context = context.elevated() + + instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebuilding instance %s"), instance_id, context=context) + + self._update_state(context, instance_id, power_state.BUILDING) + + self.driver.destroy(instance_ref) + instance_ref.image_id = image_id + self.driver.spawn(instance_ref) + + self._update_image_id(context, instance_id, image_id) + self._update_launched_at(context, instance_id) + self._update_state(context, instance_id) + + @exception.wrap_exception + @checks_instance_lock def reboot_instance(self, context, instance_id): - """Reboot an instance on this server.""" + """Reboot an instance on this host.""" context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) @@ -326,7 +388,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def snapshot_instance(self, context, instance_id, image_id): - """Snapshot an instance on this server.""" + """Snapshot an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -349,28 +411,55 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def set_admin_password(self, context, instance_id, new_pass=None): - """Set the root/admin password for an instance on this server.""" + """Set the root/admin password for an instance on this host. + + This is generally only called by API password resets after an + image has been built. + """ + context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) - instance_id = instance_ref['id'] - instance_state = instance_ref['state'] - expected_state = power_state.RUNNING - if instance_state != expected_state: - LOG.warn(_('trying to reset the password on a non-running ' - 'instance: %(instance_id)s (state: %(instance_state)s ' - 'expected: %(expected_state)s)') % locals()) - LOG.audit(_('instance %s: setting admin password'), - instance_ref['name']) + if new_pass is None: # Generate a random password new_pass = utils.generate_password(FLAGS.password_length) - self.driver.set_admin_password(instance_ref, new_pass) - self._update_state(context, instance_id) + + max_tries = 10 + + for i in xrange(max_tries): + instance_ref = self.db.instance_get(context, instance_id) + instance_id = instance_ref["id"] + instance_state = instance_ref["state"] + expected_state = power_state.RUNNING + + if instance_state != expected_state: + raise exception.Error(_('Instance is not running')) + else: + try: + self.driver.set_admin_password(instance_ref, new_pass) + LOG.audit(_("Instance %s: Root password set"), + instance_ref["name"]) + break + except NotImplementedError: + # NOTE(dprince): if the driver doesn't implement + # set_admin_password we break to avoid a loop + LOG.warn(_('set_admin_password is not implemented ' + 'by this driver.')) + break + except Exception, e: + # Catch all here because this could be anything. + LOG.exception(e) + if i == max_tries - 1: + # At some point this exception may make it back + # to the API caller, and we don't want to reveal + # too much. The real exception is logged above + raise exception.Error(_('Internal error')) + time.sleep(1) + continue @exception.wrap_exception @checks_instance_lock def inject_file(self, context, instance_id, path, file_contents): - """Write a file to the specified path on an instance on this server""" + """Write a file to the specified path in an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) instance_id = instance_ref['id'] @@ -388,44 +477,34 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def rescue_instance(self, context, instance_id): - """Rescue an instance on this server.""" + """Rescue an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: rescuing'), instance_id, context=context) - self.db.instance_set_state( - context, - instance_id, - power_state.NOSTATE, - 'rescuing') + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'rescuing') self.network_manager.setup_compute_network(context, instance_id) - self.driver.rescue( - instance_ref, - lambda result: self._update_state_callback( - self, - context, - instance_id, - result)) + _update_state = lambda result: self._update_state_callback( + self, context, instance_id, result) + self.driver.rescue(instance_ref, _update_state) self._update_state(context, instance_id) @exception.wrap_exception @checks_instance_lock def unrescue_instance(self, context, instance_id): - """Rescue an instance on this server.""" + """Rescue an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) - self.db.instance_set_state( - context, - instance_id, - power_state.NOSTATE, - 'unrescuing') - self.driver.unrescue( - instance_ref, - lambda result: self._update_state_callback( - self, - context, - instance_id, - result)) + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'unrescuing') + _update_state = lambda result: self._update_state_callback( + self, context, instance_id, result) + self.driver.unrescue(instance_ref, _update_state) self._update_state(context, instance_id) @staticmethod @@ -436,18 +515,20 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def confirm_resize(self, context, instance_id, migration_id): - """Destroys the source instance""" + """Destroys the source instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - migration_ref = self.db.migration_get(context, migration_id) self.driver.destroy(instance_ref) @exception.wrap_exception @checks_instance_lock def revert_resize(self, context, instance_id, migration_id): - """Destroys the new instance on the destination machine, - reverts the model changes, and powers on the old - instance on the source machine""" + """Destroys the new instance on the destination machine. + + Reverts the model changes, and powers on the old instance on the + source machine. + + """ instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) @@ -464,9 +545,12 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def finish_revert_resize(self, context, instance_id, migration_id): - """Finishes the second half of reverting a resize, powering back on - the source instance and reverting the resized attributes in the - database""" + """Finishes the second half of reverting a resize. + + Power back on the source instance and revert the resized attributes + in the database. + + """ instance_ref = self.db.instance_get(context, instance_id) migration_ref = self.db.migration_get(context, migration_id) instance_type = self.db.instance_type_get_by_flavor_id(context, @@ -486,8 +570,11 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def prep_resize(self, context, instance_id, flavor_id): - """Initiates the process of moving a running instance to another - host, possibly changing the RAM and disk size in the process""" + """Initiates the process of moving a running instance to another host. + + Possibly changes the RAM and disk size in the process. + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) if instance_ref['host'] == FLAGS.host: @@ -519,34 +606,38 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def resize_instance(self, context, instance_id, migration_id): - """Starts the migration of a running instance to another host""" + """Starts the migration of a running instance to another host.""" migration_ref = self.db.migration_get(context, migration_id) instance_ref = self.db.instance_get(context, instance_id) - self.db.migration_update(context, migration_id, - {'status': 'migrating', }) - - disk_info = self.driver.migrate_disk_and_power_off(instance_ref, - migration_ref['dest_host']) - self.db.migration_update(context, migration_id, - {'status': 'post-migrating', }) - - service = self.db.service_get_by_host_and_topic(context, - migration_ref['dest_compute'], FLAGS.compute_topic) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, - migration_ref['dest_compute']) - rpc.cast(context, topic, - {'method': 'finish_resize', - 'args': { - 'migration_id': migration_id, - 'instance_id': instance_id, - 'disk_info': disk_info, }, - }) + self.db.migration_update(context, + migration_id, + {'status': 'migrating'}) + + disk_info = self.driver.migrate_disk_and_power_off( + instance_ref, migration_ref['dest_host']) + self.db.migration_update(context, + migration_id, + {'status': 'post-migrating'}) + + service = self.db.service_get_by_host_and_topic( + context, migration_ref['dest_compute'], FLAGS.compute_topic) + topic = self.db.queue_get_for(context, + FLAGS.compute_topic, + migration_ref['dest_compute']) + rpc.cast(context, topic, {'method': 'finish_resize', + 'args': {'migration_id': migration_id, + 'instance_id': instance_id, + 'disk_info': disk_info}}) @exception.wrap_exception @checks_instance_lock def finish_resize(self, context, instance_id, migration_id, disk_info): - """Completes the migration process by setting up the newly transferred - disk and turning on the instance on its new host machine""" + """Completes the migration process. + + Sets up the newly transferred disk and turns on the instance at its + new host machine. + + """ migration_ref = self.db.migration_get(context, migration_id) instance_ref = self.db.instance_get(context, migration_ref['instance_id']) @@ -555,7 +646,7 @@ class ComputeManager(manager.SchedulerDependentManager): instance_type = self.db.instance_type_get_by_flavor_id(context, migration_ref['new_flavor_id']) self.db.instance_update(context, instance_id, - dict(instance_type=instance_type['name'], + dict(instance_type_id=instance_type['id'], memory_mb=instance_type['memory_mb'], vcpus=instance_type['vcpus'], local_gb=instance_type['local_gb'])) @@ -571,7 +662,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def pause_instance(self, context, instance_id): - """Pause an instance on this server.""" + """Pause an instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: pausing'), instance_id, context=context) @@ -588,7 +679,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def unpause_instance(self, context, instance_id): - """Unpause a paused instance on this server.""" + """Unpause a paused instance on this host.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: unpausing'), instance_id, context=context) @@ -604,7 +695,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def get_diagnostics(self, context, instance_id): - """Retrieve diagnostics for an instance on this server.""" + """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: @@ -615,10 +706,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def suspend_instance(self, context, instance_id): - """ - suspend the instance with instance_id - - """ + """Suspend the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: suspending'), instance_id, context=context) @@ -634,10 +722,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock def resume_instance(self, context, instance_id): - """ - resume the suspended instance with instance_id - - """ + """Resume the given suspended instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_('instance %s: resuming'), instance_id, context=context) @@ -652,34 +737,23 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def lock_instance(self, context, instance_id): - """ - lock the instance with instance_id - - """ + """Lock the given instance.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: locking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': True}) @exception.wrap_exception def unlock_instance(self, context, instance_id): - """ - unlock the instance with instance_id - - """ + """Unlock the given instance.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: unlocking'), instance_id, context=context) self.db.instance_update(context, instance_id, {'locked': False}) @exception.wrap_exception def get_lock(self, context, instance_id): - """ - return the boolean state of (instance with instance_id)'s lock - - """ + """Return the boolean state of the given instance's lock.""" context = context.elevated() LOG.debug(_('instance %s: getting locked state'), instance_id, context=context) @@ -688,10 +762,7 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reset_network(self, context, instance_id): - """ - Reset networking on the instance. - - """ + """Reset networking on the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: reset network'), instance_id, @@ -700,10 +771,7 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def inject_network_info(self, context, instance_id): - """ - Inject network info for the instance. - - """ + """Inject network info for the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: inject network info'), instance_id, @@ -712,29 +780,28 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception def get_console_output(self, context, instance_id): - """Send the console output for an instance.""" + """Send the console output for the given instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) LOG.audit(_("Get console output for instance %s"), instance_id, context=context) - return self.driver.get_console_output(instance_ref) + output = self.driver.get_console_output(instance_ref) + return output.decode('utf-8', 'replace').encode('ascii', 'replace') @exception.wrap_exception def get_ajax_console(self, context, instance_id): - """Return connection information for an ajax console""" + """Return connection information for an ajax console.""" context = context.elevated() LOG.debug(_("instance %s: getting ajax console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - return self.driver.get_ajax_console(instance_ref) @exception.wrap_exception def get_vnc_console(self, context, instance_id): - """Return connection information for an vnc console.""" + """Return connection information for a vnc console.""" context = context.elevated() LOG.debug(_("instance %s: getting vnc console"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - return self.driver.get_vnc_console(instance_ref) @checks_instance_lock @@ -786,9 +853,17 @@ class ComputeManager(manager.SchedulerDependentManager): self.db.volume_detached(context, volume_id) return True + def remove_volume(self, context, volume_id): + """Remove volume on compute host. + + :param context: security context + :param volume_id: volume ID + """ + self.volume_manager.remove_compute_volume(context, volume_id) + @exception.wrap_exception def compare_cpu(self, context, cpu_info): - """Checks the host cpu is compatible to a cpu given by xml. + """Checks that the host cpu is compatible with a cpu given by xml. :param context: security context :param cpu_info: json string obtained from virConnect.getCapabilities @@ -809,7 +884,6 @@ class ComputeManager(manager.SchedulerDependentManager): :returns: tmpfile name(basename) """ - dirpath = FLAGS.instances_path fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to notify to other " @@ -826,10 +900,9 @@ class ComputeManager(manager.SchedulerDependentManager): :param filename: confirm existence of FLAGS.instances_path/thisfile """ - tmp_file = os.path.join(FLAGS.instances_path, filename) if not os.path.exists(tmp_file): - raise exception.NotFound(_('%s not found') % tmp_file) + raise exception.FileNotFound(file_path=tmp_file) @exception.wrap_exception def cleanup_shared_storage_test_file(self, context, filename): @@ -839,7 +912,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param filename: remove existence of FLAGS.instances_path/thisfile """ - tmp_file = os.path.join(FLAGS.instances_path, filename) os.remove(tmp_file) @@ -851,7 +923,6 @@ class ComputeManager(manager.SchedulerDependentManager): :returns: See driver.update_available_resource() """ - return self.driver.update_available_resource(context, self.host) def pre_live_migration(self, context, instance_id, time=None): @@ -861,7 +932,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param instance_id: nova.db.sqlalchemy.models.Instance.Id """ - if not time: time = greenthread @@ -872,8 +942,7 @@ class ComputeManager(manager.SchedulerDependentManager): # Getting fixed ips fixed_ip = self.db.instance_get_fixed_address(context, instance_id) if not fixed_ip: - msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.") - raise exception.NotFound(msg % locals()) + raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id) # If any volume is mounted, prepare here. if not instance_ref['volumes']: @@ -920,7 +989,6 @@ class ComputeManager(manager.SchedulerDependentManager): :param dest: destination host """ - # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) i_name = instance_ref.name @@ -1011,17 +1079,15 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None): + def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): """Recovers Instance/volume state from migrating -> running. :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id - :param host: - DB column value is updated by this hostname. - if none, the host instance currently running is selected. + :param host: DB column value is updated by this hostname. + If none, the host instance currently running is selected. """ - if not host: host = instance_ref['host'] @@ -1031,8 +1097,13 @@ class ComputeManager(manager.SchedulerDependentManager): 'state': power_state.RUNNING, 'host': host}) - for volume in instance_ref['volumes']: - self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'}) + if dest: + volume_api = volume.API() + for volume_ref in instance_ref['volumes']: + volume_id = volume_ref['id'] + self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) + if dest: + volume_api.remove_from_compute(ctxt, volume_id, dest) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" @@ -1049,6 +1120,13 @@ class ComputeManager(manager.SchedulerDependentManager): error_list.append(ex) try: + self._report_driver_status() + except Exception as ex: + LOG.warning(_("Error during report_driver_status(): %s"), + unicode(ex)) + error_list.append(ex) + + try: self._poll_instance_states(context) except Exception as ex: LOG.warning(_("Error during instance poll: %s"), @@ -1057,6 +1135,16 @@ class ComputeManager(manager.SchedulerDependentManager): return error_list + def _report_driver_status(self): + curr_time = time.time() + if curr_time - self._last_host_check > FLAGS.host_state_interval: + self._last_host_check = curr_time + LOG.info(_("Updating host status")) + # This will grab info about the host and queue it + # to be sent to the Schedulers. + self.update_service_capabilities( + self.driver.get_host_stats(refresh=True)) + def _poll_instance_states(self, context): vm_instances = self.driver.list_instances_detail() vm_instances = dict((vm.name, vm) for vm in vm_instances) @@ -1074,8 +1162,7 @@ class ComputeManager(manager.SchedulerDependentManager): if vm_instance is None: # NOTE(justinsb): We have to be very careful here, because a # concurrent operation could be in progress (e.g. a spawn) - if db_state == power_state.NOSTATE: - # Assume that NOSTATE => spawning + if db_state == power_state.BUILDING: # TODO(justinsb): This does mean that if we crash during a # spawn, the machine will never leave the spawning state, # but this is just the way nova is; this function isn't @@ -1095,23 +1182,28 @@ class ComputeManager(manager.SchedulerDependentManager): vm_state = vm_instance.state vms_not_found_in_db.remove(name) + if db_instance['state_description'] == 'migrating': + # A situation which db record exists, but no instance" + # sometimes occurs while live-migration at src compute, + # this case should be ignored. + LOG.debug(_("Ignoring %(name)s, as it's currently being " + "migrated.") % locals()) + continue + if vm_state != db_state: LOG.info(_("DB/VM state mismatch. Changing state from " "'%(db_state)s' to '%(vm_state)s'") % locals()) - self.db.instance_set_state(context, - db_instance['id'], - vm_state) + self._update_state(context, db_instance['id'], vm_state) - if vm_state == power_state.SHUTOFF: - # TODO(soren): This is what the compute manager does when you - # terminate an instance. At some point I figure we'll have a - # "terminated" state and some sort of cleanup job that runs - # occasionally, cleaning them out. - self.db.instance_destroy(context, db_instance['id']) + # NOTE(justinsb): We no longer auto-remove SHUTOFF instances + # It's quite hard to get them back when we do. # Are there VMs not in the DB? for vm_not_found_in_db in vms_not_found_in_db: name = vm_not_found_in_db - # TODO(justinsb): What to do here? Adopt it? Shut it down? - LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") - % locals()) + + # We only care about instances that compute *should* know about + if name.startswith("instance-"): + # TODO(justinsb): What to do here? Adopt it? Shut it down? + LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring") + % locals()) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 04e08a235..3bb54a382 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -260,7 +260,7 @@ class Instance(object): try: data = self.fetch_cpu_stats() - if data != None: + if data is not None: LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) @@ -313,7 +313,7 @@ class Instance(object): LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. - if cputime_last_updated == None: + if cputime_last_updated is None: return None # Calculate the number of seconds between samples. diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py index ef013b2ef..c468fe6b3 100644 --- a/nova/compute/power_state.py +++ b/nova/compute/power_state.py @@ -30,20 +30,23 @@ SHUTOFF = 0x05 CRASHED = 0x06 SUSPENDED = 0x07 FAILED = 0x08 +BUILDING = 0x09 # TODO(justinsb): Power state really needs to be a proper class, # so that we're not locked into the libvirt status codes and can put mapping # logic here rather than spread throughout the code _STATE_MAP = { - NOSTATE: 'pending', - RUNNING: 'running', - BLOCKED: 'blocked', - PAUSED: 'paused', - SHUTDOWN: 'shutdown', - SHUTOFF: 'shutdown', - CRASHED: 'crashed', - SUSPENDED: 'suspended', - FAILED: 'failed to spawn'} + NOSTATE: 'pending', + RUNNING: 'running', + BLOCKED: 'blocked', + PAUSED: 'paused', + SHUTDOWN: 'shutdown', + SHUTOFF: 'shutdown', + CRASHED: 'crashed', + SUSPENDED: 'suspended', + FAILED: 'failed to spawn', + BUILDING: 'building', +} def name(code): diff --git a/nova/console/api.py b/nova/console/api.py index 3850d2c44..137ddcaac 100644 --- a/nova/console/api.py +++ b/nova/console/api.py @@ -15,23 +15,19 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles ConsoleProxy API requests -""" +"""Handles ConsoleProxy API requests.""" from nova import exception -from nova.db import base - - from nova import flags from nova import rpc +from nova.db import base FLAGS = flags.FLAGS class API(base.Base): - """API for spining up or down console proxy connections""" + """API for spinning up or down console proxy connections.""" def __init__(self, **kwargs): super(API, self).__init__(**kwargs) @@ -51,8 +47,8 @@ class API(base.Base): self.db.queue_get_for(context, FLAGS.console_topic, pool['host']), - {"method": "remove_console", - "args": {"console_id": console['id']}}) + {'method': 'remove_console', + 'args': {'console_id': console['id']}}) def create_console(self, context, instance_id): instance = self.db.instance_get(context, instance_id) @@ -63,13 +59,12 @@ class API(base.Base): # here. rpc.cast(context, self._get_console_topic(context, instance['host']), - {"method": "add_console", - "args": {"instance_id": instance_id}}) + {'method': 'add_console', + 'args': {'instance_id': instance_id}}) def _get_console_topic(self, context, instance_host): topic = self.db.queue_get_for(context, FLAGS.compute_topic, instance_host) - return rpc.call(context, - topic, - {"method": "get_console_topic", "args": {'fake': 1}}) + return rpc.call(context, topic, {'method': 'get_console_topic', + 'args': {'fake': 1}}) diff --git a/nova/console/fake.py b/nova/console/fake.py index 7a90d5221..e2eb886f8 100644 --- a/nova/console/fake.py +++ b/nova/console/fake.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Fake ConsoleProxy driver for tests. -""" +"""Fake ConsoleProxy driver for tests.""" from nova import exception @@ -27,32 +25,32 @@ class FakeConsoleProxy(object): @property def console_type(self): - return "fake" + return 'fake' def setup_console(self, context, console): - """Sets up actual proxies""" + """Sets up actual proxies.""" pass def teardown_console(self, context, console): - """Tears down actual proxies""" + """Tears down actual proxies.""" pass def init_host(self): - """Start up any config'ed consoles on start""" + """Start up any config'ed consoles on start.""" pass def generate_password(self, length=8): - """Returns random console password""" - return "fakepass" + """Returns random console password.""" + return 'fakepass' def get_port(self, context): - """get available port for consoles that need one""" + """Get available port for consoles that need one.""" return 5999 def fix_pool_password(self, password): - """Trim password to length, and any other massaging""" + """Trim password to length, and any other massaging.""" return password def fix_console_password(self, password): - """Trim password to length, and any other massaging""" + """Trim password to length, and any other massaging.""" return password diff --git a/nova/console/manager.py b/nova/console/manager.py index bfa571ea9..e0db21666 100644 --- a/nova/console/manager.py +++ b/nova/console/manager.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Console Proxy Service -""" +"""Console Proxy Service.""" import functools import socket @@ -29,6 +27,7 @@ from nova import manager from nova import rpc from nova import utils + FLAGS = flags.FLAGS flags.DEFINE_string('console_driver', 'nova.console.xvp.XVPConsoleProxy', @@ -41,9 +40,11 @@ flags.DEFINE_string('console_public_hostname', class ConsoleProxyManager(manager.Manager): + """Sets up and tears down any console proxy connections. + + Needed for accessing instance consoles securely. - """ Sets up and tears down any proxy connections needed for accessing - instance consoles securely""" + """ def __init__(self, console_driver=None, *args, **kwargs): if not console_driver: @@ -67,7 +68,7 @@ class ConsoleProxyManager(manager.Manager): pool['id'], instance_id) except exception.NotFound: - logging.debug(_("Adding console")) + logging.debug(_('Adding console')) if not password: password = utils.generate_password(8) if not port: @@ -115,8 +116,8 @@ class ConsoleProxyManager(manager.Manager): self.db.queue_get_for(context, FLAGS.compute_topic, instance_host), - {"method": "get_console_pool_info", - "args": {"console_type": console_type}}) + {'method': 'get_console_pool_info', + 'args': {'console_type': console_type}}) pool_info['password'] = self.driver.fix_pool_password( pool_info['password']) pool_info['host'] = self.host diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py index 521da289f..cc8b0cdf5 100644 --- a/nova/console/vmrc.py +++ b/nova/console/vmrc.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -VMRC console drivers. -""" +"""VMRC console drivers.""" import base64 import json @@ -27,6 +25,8 @@ from nova import flags from nova import log as logging from nova.virt.vmwareapi import vim_util + +FLAGS = flags.FLAGS flags.DEFINE_integer('console_vmrc_port', 443, "port for VMware VMRC connections") @@ -34,8 +34,6 @@ flags.DEFINE_integer('console_vmrc_error_retries', 10, "number of retries for retrieving VMRC information") -FLAGS = flags.FLAGS - class VMRCConsole(object): """VMRC console driver with ESX credentials.""" @@ -69,34 +67,33 @@ class VMRCConsole(object): return password def generate_password(self, vim_session, pool, instance_name): - """ - Returns VMRC Connection credentials. + """Returns VMRC Connection credentials. Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'. + """ username, password = pool['username'], pool['password'] - vms = vim_session._call_method(vim_util, "get_objects", - "VirtualMachine", ["name", "config.files.vmPathName"]) + vms = vim_session._call_method(vim_util, 'get_objects', + 'VirtualMachine', ['name', 'config.files.vmPathName']) vm_ds_path_name = None vm_ref = None for vm in vms: vm_name = None ds_path_name = None for prop in vm.propSet: - if prop.name == "name": + if prop.name == 'name': vm_name = prop.val - elif prop.name == "config.files.vmPathName": + elif prop.name == 'config.files.vmPathName': ds_path_name = prop.val if vm_name == instance_name: vm_ref = vm.obj vm_ds_path_name = ds_path_name break if vm_ref is None: - raise exception.NotFound(_("instance - %s not present") % - instance_name) - json_data = json.dumps({"vm_id": vm_ds_path_name, - "username": username, - "password": password}) + raise exception.InstanceNotFound(instance_id=instance_name) + json_data = json.dumps({'vm_id': vm_ds_path_name, + 'username': username, + 'password': password}) return base64.b64encode(json_data) def is_otp(self): @@ -115,28 +112,27 @@ class VMRCSessionConsole(VMRCConsole): return 'vmrc+session' def generate_password(self, vim_session, pool, instance_name): - """ - Returns a VMRC Session. + """Returns a VMRC Session. Return string is of the form '<VM MOID>:<VMRC Ticket>'. + """ - vms = vim_session._call_method(vim_util, "get_objects", - "VirtualMachine", ["name"]) - vm_ref = None + vms = vim_session._call_method(vim_util, 'get_objects', + 'VirtualMachine', ['name']) + vm_ref = NoneV for vm in vms: if vm.propSet[0].val == instance_name: vm_ref = vm.obj if vm_ref is None: - raise exception.NotFound(_("instance - %s not present") % - instance_name) + raise exception.InstanceNotFound(instance_id=instance_name) virtual_machine_ticket = \ vim_session._call_method( vim_session._get_vim(), - "AcquireCloneTicket", + 'AcquireCloneTicket', vim_session._get_vim().get_service_content().sessionManager) - json_data = json.dumps({"vm_id": str(vm_ref.value), - "username": virtual_machine_ticket, - "password": virtual_machine_ticket}) + json_data = json.dumps({'vm_id': str(vm_ref.value), + 'username': virtual_machine_ticket, + 'password': virtual_machine_ticket}) return base64.b64encode(json_data) def is_otp(self): diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py index 09beac7a0..acecc1075 100644 --- a/nova/console/vmrc_manager.py +++ b/nova/console/vmrc_manager.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -VMRC Console Manager. -""" +"""VMRC Console Manager.""" from nova import exception from nova import flags @@ -25,24 +23,21 @@ from nova import log as logging from nova import manager from nova import rpc from nova import utils -from nova.virt.vmwareapi_conn import VMWareAPISession +from nova.virt import vmwareapi_conn + LOG = logging.getLogger("nova.console.vmrc_manager") + FLAGS = flags.FLAGS -flags.DEFINE_string('console_public_hostname', - '', +flags.DEFINE_string('console_public_hostname', '', 'Publicly visible name for this console host') -flags.DEFINE_string('console_driver', - 'nova.console.vmrc.VMRCConsole', +flags.DEFINE_string('console_driver', 'nova.console.vmrc.VMRCConsole', 'Driver to use for the console') class ConsoleVMRCManager(manager.Manager): - - """ - Manager to handle VMRC connections needed for accessing instance consoles. - """ + """Manager to handle VMRC connections for accessing instance consoles.""" def __init__(self, console_driver=None, *args, **kwargs): self.driver = utils.import_object(FLAGS.console_driver) @@ -56,16 +51,17 @@ class ConsoleVMRCManager(manager.Manager): """Get VIM session for the pool specified.""" vim_session = None if pool['id'] not in self.sessions.keys(): - vim_session = VMWareAPISession(pool['address'], - pool['username'], - pool['password'], - FLAGS.console_vmrc_error_retries) + vim_session = vmwareapi_conn.VMWareAPISession( + pool['address'], + pool['username'], + pool['password'], + FLAGS.console_vmrc_error_retries) self.sessions[pool['id']] = vim_session return self.sessions[pool['id']] def _generate_console(self, context, pool, name, instance_id, instance): """Sets up console for the instance.""" - LOG.debug(_("Adding console")) + LOG.debug(_('Adding console')) password = self.driver.generate_password( self._get_vim_session(pool), @@ -84,9 +80,10 @@ class ConsoleVMRCManager(manager.Manager): @exception.wrap_exception def add_console(self, context, instance_id, password=None, port=None, **kwargs): - """ - Adds a console for the instance. If it is one time password, then we - generate new console credentials. + """Adds a console for the instance. + + If it is one time password, then we generate new console credentials. + """ instance = self.db.instance_get(context, instance_id) host = instance['host'] @@ -97,19 +94,17 @@ class ConsoleVMRCManager(manager.Manager): pool['id'], instance_id) if self.driver.is_otp(): - console = self._generate_console( - context, - pool, - name, - instance_id, - instance) + console = self._generate_console(context, + pool, + name, + instance_id, + instance) except exception.NotFound: - console = self._generate_console( - context, - pool, - name, - instance_id, - instance) + console = self._generate_console(context, + pool, + name, + instance_id, + instance) return console['id'] @exception.wrap_exception @@ -118,13 +113,11 @@ class ConsoleVMRCManager(manager.Manager): try: console = self.db.console_get(context, console_id) except exception.NotFound: - LOG.debug(_("Tried to remove non-existent console " - "%(console_id)s.") % - {'console_id': console_id}) + LOG.debug(_('Tried to remove non-existent console ' + '%(console_id)s.') % {'console_id': console_id}) return - LOG.debug(_("Removing console " - "%(console_id)s.") % - {'console_id': console_id}) + LOG.debug(_('Removing console ' + '%(console_id)s.') % {'console_id': console_id}) self.db.console_delete(context, console_id) self.driver.teardown_console(context, console) @@ -139,11 +132,11 @@ class ConsoleVMRCManager(manager.Manager): console_type) except exception.NotFound: pool_info = rpc.call(context, - self.db.queue_get_for(context, - FLAGS.compute_topic, - instance_host), - {"method": "get_console_pool_info", - "args": {"console_type": console_type}}) + self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host), + {'method': 'get_console_pool_info', + 'args': {'console_type': console_type}}) pool_info['password'] = self.driver.fix_pool_password( pool_info['password']) pool_info['host'] = self.host diff --git a/nova/console/xvp.py b/nova/console/xvp.py index 0cedfbb13..3cd287183 100644 --- a/nova/console/xvp.py +++ b/nova/console/xvp.py @@ -15,16 +15,14 @@ # License for the specific language governing permissions and limitations # under the License. -""" -XVP (Xenserver VNC Proxy) driver. -""" +"""XVP (Xenserver VNC Proxy) driver.""" import fcntl import os import signal import subprocess -from Cheetah.Template import Template +from Cheetah import Template from nova import context from nova import db @@ -33,6 +31,8 @@ from nova import flags from nova import log as logging from nova import utils + +FLAGS = flags.FLAGS flags.DEFINE_string('console_xvp_conf_template', utils.abspath('console/xvp.conf.template'), 'XVP conf template') @@ -47,12 +47,11 @@ flags.DEFINE_string('console_xvp_log', 'XVP log file') flags.DEFINE_integer('console_xvp_multiplex_port', 5900, - "port for XVP to multiplex VNC connections on") -FLAGS = flags.FLAGS + 'port for XVP to multiplex VNC connections on') class XVPConsoleProxy(object): - """Sets up XVP config, and manages xvp daemon""" + """Sets up XVP config, and manages XVP daemon.""" def __init__(self): self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read() @@ -61,50 +60,51 @@ class XVPConsoleProxy(object): @property def console_type(self): - return "vnc+xvp" + return 'vnc+xvp' def get_port(self, context): - """get available port for consoles that need one""" + """Get available port for consoles that need one.""" #TODO(mdragon): implement port selection for non multiplex ports, # we are not using that, but someone else may want # it. return FLAGS.console_xvp_multiplex_port def setup_console(self, context, console): - """Sets up actual proxies""" + """Sets up actual proxies.""" self._rebuild_xvp_conf(context.elevated()) def teardown_console(self, context, console): - """Tears down actual proxies""" + """Tears down actual proxies.""" self._rebuild_xvp_conf(context.elevated()) def init_host(self): - """Start up any config'ed consoles on start""" + """Start up any config'ed consoles on start.""" ctxt = context.get_admin_context() self._rebuild_xvp_conf(ctxt) def fix_pool_password(self, password): - """Trim password to length, and encode""" + """Trim password to length, and encode.""" return self._xvp_encrypt(password, is_pool_password=True) def fix_console_password(self, password): - """Trim password to length, and encode""" + """Trim password to length, and encode.""" return self._xvp_encrypt(password) def _rebuild_xvp_conf(self, context): - logging.debug(_("Rebuilding xvp conf")) + logging.debug(_('Rebuilding xvp conf')) pools = [pool for pool in db.console_pool_get_all_by_host_type(context, self.host, self.console_type) if pool['consoles']] if not pools: - logging.debug("No console pools!") + logging.debug('No console pools!') self._xvp_stop() return conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port, 'pools': pools, 'pass_encode': self.fix_console_password} - config = str(Template(self.xvpconf_template, searchList=[conf_data])) + config = str(Template.Template(self.xvpconf_template, + searchList=[conf_data])) self._write_conf(config) self._xvp_restart() @@ -114,7 +114,7 @@ class XVPConsoleProxy(object): cfile.write(config) def _xvp_stop(self): - logging.debug(_("Stopping xvp")) + logging.debug(_('Stopping xvp')) pid = self._xvp_pid() if not pid: return @@ -127,19 +127,19 @@ class XVPConsoleProxy(object): def _xvp_start(self): if self._xvp_check_running(): return - logging.debug(_("Starting xvp")) + logging.debug(_('Starting xvp')) try: utils.execute('xvp', '-p', FLAGS.console_xvp_pid, '-c', FLAGS.console_xvp_conf, '-l', FLAGS.console_xvp_log) except exception.ProcessExecutionError, err: - logging.error(_("Error starting xvp: %s") % err) + logging.error(_('Error starting xvp: %s') % err) def _xvp_restart(self): - logging.debug(_("Restarting xvp")) + logging.debug(_('Restarting xvp')) if not self._xvp_check_running(): - logging.debug(_("xvp not running...")) + logging.debug(_('xvp not running...')) self._xvp_start() else: pid = self._xvp_pid() @@ -178,7 +178,9 @@ class XVPConsoleProxy(object): Note that xvp's obfuscation should not be considered 'real' encryption. It simply DES encrypts the passwords with static keys plainly viewable - in the xvp source code.""" + in the xvp source code. + + """ maxlen = 8 flag = '-e' if is_pool_password: diff --git a/nova/context.py b/nova/context.py index 0256bf448..c113f7ea7 100644 --- a/nova/context.py +++ b/nova/context.py @@ -16,9 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -RequestContext: context for requests that persist through all of nova. -""" +"""RequestContext: context for requests that persist through all of nova.""" import datetime import random @@ -28,6 +26,12 @@ from nova import utils class RequestContext(object): + """Security context and request information. + + Represents the user taking a given action within the system. + + """ + def __init__(self, user, project, is_admin=None, read_deleted=False, remote_address=None, timestamp=None, request_id=None): if hasattr(user, 'id'): diff --git a/nova/crypto.py b/nova/crypto.py index 9b1897926..bdc32482a 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -15,10 +15,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Wrappers around standard crypto data elements. + +"""Wrappers around standard crypto data elements. Includes root and intermediate CAs, SSH key_pairs and x509 certificates. + """ import base64 @@ -43,6 +44,8 @@ from nova import log as logging LOG = logging.getLogger("nova.crypto") + + FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', @@ -90,13 +93,13 @@ def key_path(project_id=None): def fetch_ca(project_id=None, chain=True): if not FLAGS.use_project_ca: project_id = None - buffer = "" + buffer = '' if project_id: - with open(ca_path(project_id), "r") as cafile: + with open(ca_path(project_id), 'r') as cafile: buffer += cafile.read() if not chain: return buffer - with open(ca_path(None), "r") as cafile: + with open(ca_path(None), 'r') as cafile: buffer += cafile.read() return buffer @@ -143,7 +146,7 @@ def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'): def revoke_cert(project_id, file_name): - """Revoke a cert by file name""" + """Revoke a cert by file name.""" start = os.getcwd() os.chdir(ca_folder(project_id)) # NOTE(vish): potential race condition here @@ -155,14 +158,14 @@ def revoke_cert(project_id, file_name): def revoke_certs_by_user(user_id): - """Revoke all user certs""" + """Revoke all user certs.""" admin = context.get_admin_context() for cert in db.certificate_get_all_by_user(admin, user_id): revoke_cert(cert['project_id'], cert['file_name']) def revoke_certs_by_project(project_id): - """Revoke all project certs""" + """Revoke all project certs.""" # NOTE(vish): This is somewhat useless because we can just shut down # the vpn. admin = context.get_admin_context() @@ -171,29 +174,29 @@ def revoke_certs_by_project(project_id): def revoke_certs_by_user_and_project(user_id, project_id): - """Revoke certs for user in project""" + """Revoke certs for user in project.""" admin = context.get_admin_context() for cert in db.certificate_get_all_by_user(admin, user_id, project_id): revoke_cert(cert['project_id'], cert['file_name']) def _project_cert_subject(project_id): - """Helper to generate user cert subject""" + """Helper to generate user cert subject.""" return FLAGS.project_cert_subject % (project_id, utils.isotime()) def _vpn_cert_subject(project_id): - """Helper to generate user cert subject""" + """Helper to generate user cert subject.""" return FLAGS.vpn_cert_subject % (project_id, utils.isotime()) def _user_cert_subject(user_id, project_id): - """Helper to generate user cert subject""" + """Helper to generate user cert subject.""" return FLAGS.user_cert_subject % (project_id, user_id, utils.isotime()) def generate_x509_cert(user_id, project_id, bits=1024): - """Generate and sign a cert for user in project""" + """Generate and sign a cert for user in project.""" subject = _user_cert_subject(user_id, project_id) tmpdir = tempfile.mkdtemp() keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key')) @@ -205,7 +208,7 @@ def generate_x509_cert(user_id, project_id, bits=1024): csr = open(csrfile).read() shutil.rmtree(tmpdir) (serial, signed_csr) = sign_csr(csr, project_id) - fname = os.path.join(ca_folder(project_id), "newcerts/%s.pem" % serial) + fname = os.path.join(ca_folder(project_id), 'newcerts/%s.pem' % serial) cert = {'user_id': user_id, 'project_id': project_id, 'file_name': fname} @@ -227,12 +230,12 @@ def _ensure_project_folder(project_id): def generate_vpn_files(project_id): project_folder = ca_folder(project_id) - csr_fn = os.path.join(project_folder, "server.csr") - crt_fn = os.path.join(project_folder, "server.crt") + csr_fn = os.path.join(project_folder, 'server.csr') + crt_fn = os.path.join(project_folder, 'server.crt') genvpn_sh_path = os.path.join(os.path.dirname(__file__), 'CA', - 'geninter.sh') + 'genvpn.sh') if os.path.exists(crt_fn): return _ensure_project_folder(project_id) @@ -241,10 +244,10 @@ def generate_vpn_files(project_id): # TODO(vish): the shell scripts could all be done in python utils.execute('sh', genvpn_sh_path, project_id, _vpn_cert_subject(project_id)) - with open(csr_fn, "r") as csrfile: + with open(csr_fn, 'r') as csrfile: csr_text = csrfile.read() (serial, signed_csr) = sign_csr(csr_text, project_id) - with open(crt_fn, "w") as crtfile: + with open(crt_fn, 'w') as crtfile: crtfile.write(signed_csr) os.chdir(start) @@ -261,12 +264,12 @@ def sign_csr(csr_text, project_id=None): def _sign_csr(csr_text, ca_folder): tmpfolder = tempfile.mkdtemp() - inbound = os.path.join(tmpfolder, "inbound.csr") - outbound = os.path.join(tmpfolder, "outbound.csr") - csrfile = open(inbound, "w") + inbound = os.path.join(tmpfolder, 'inbound.csr') + outbound = os.path.join(tmpfolder, 'outbound.csr') + csrfile = open(inbound, 'w') csrfile.write(csr_text) csrfile.close() - LOG.debug(_("Flags path: %s"), ca_folder) + LOG.debug(_('Flags path: %s'), ca_folder) start = os.getcwd() # Change working dir to CA if not os.path.exists(ca_folder): @@ -276,13 +279,13 @@ def _sign_csr(csr_text, ca_folder): './openssl.cnf', '-infiles', inbound) out, _err = utils.execute('openssl', 'x509', '-in', outbound, '-serial', '-noout') - serial = string.strip(out.rpartition("=")[2]) + serial = string.strip(out.rpartition('=')[2]) os.chdir(start) - with open(outbound, "r") as crtfile: + with open(outbound, 'r') as crtfile: return (serial, crtfile.read()) -def mkreq(bits, subject="foo", ca=0): +def mkreq(bits, subject='foo', ca=0): pk = M2Crypto.EVP.PKey() req = M2Crypto.X509.Request() rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None) @@ -314,7 +317,7 @@ def mkcacert(subject='nova', years=1): cert.set_not_before(now) cert.set_not_after(nowPlusYear) issuer = M2Crypto.X509.X509_Name() - issuer.C = "US" + issuer.C = 'US' issuer.CN = subject cert.set_issuer(issuer) cert.set_pubkey(pkey) @@ -329,6 +332,51 @@ def mkcacert(subject='nova', years=1): return cert, pk, pkey +def _build_cipher(key, iv, encode=True): + """Make a 128bit AES CBC encode/decode Cipher object. + Padding is handled internally.""" + operation = 1 if encode else 0 + return M2Crypto.EVP.Cipher(alg='aes_128_cbc', key=key, iv=iv, op=operation) + + +def encryptor(key, iv=None): + """Simple symmetric key encryption.""" + key = base64.b64decode(key) + if iv is None: + iv = '\0' * 16 + else: + iv = base64.b64decode(iv) + + def encrypt(data): + cipher = _build_cipher(key, iv, encode=True) + v = cipher.update(data) + v = v + cipher.final() + del cipher + v = base64.b64encode(v) + return v + + return encrypt + + +def decryptor(key, iv=None): + """Simple symmetric key decryption.""" + key = base64.b64decode(key) + if iv is None: + iv = '\0' * 16 + else: + iv = base64.b64decode(iv) + + def decrypt(data): + data = base64.b64decode(data) + cipher = _build_cipher(key, iv, encode=False) + v = cipher.update(data) + v = v + cipher.final() + del cipher + return v + + return decrypt + + # Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a @@ -352,13 +400,15 @@ def mkcacert(subject='nova', years=1): # http://code.google.com/p/boto def compute_md5(fp): - """ + """Compute an md5 hash. + :type fp: file :param fp: File pointer to the file to MD5 hash. The file pointer will be reset to the beginning of the file before the method returns. :rtype: tuple - :return: the hex digest version of the MD5 hash + :returns: the hex digest version of the MD5 hash + """ m = hashlib.md5() fp.seek(0) diff --git a/nova/db/api.py b/nova/db/api.py index 3c4d21941..f5c71019a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -15,8 +15,8 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Defines interface for DB access. + +"""Defines interface for DB access. The underlying driver is loaded as a :class:`LazyPluggable`. @@ -30,6 +30,7 @@ The underlying driver is loaded as a :class:`LazyPluggable`. :enable_new_services: when adding a new service to the database, is it in the pool of available hardware (Default: True) + """ from nova import exception @@ -86,7 +87,7 @@ def service_get(context, service_id): def service_get_by_host_and_topic(context, host, topic): - """Get a service by host it's on and topic it listens to""" + """Get a service by host it's on and topic it listens to.""" return IMPL.service_get_by_host_and_topic(context, host, topic) @@ -113,7 +114,7 @@ def service_get_all_compute_by_host(context, host): def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. - Returns a list of (Service, instance_count) tuples. + :returns: a list of (Service, instance_count) tuples. """ return IMPL.service_get_all_compute_sorted(context) @@ -122,7 +123,7 @@ def service_get_all_compute_sorted(context): def service_get_all_network_sorted(context): """Get all network services sorted by network count. - Returns a list of (Service, network_count) tuples. + :returns: a list of (Service, network_count) tuples. """ return IMPL.service_get_all_network_sorted(context) @@ -131,7 +132,7 @@ def service_get_all_network_sorted(context): def service_get_all_volume_sorted(context): """Get all volume services sorted by volume count. - Returns a list of (Service, volume_count) tuples. + :returns: a list of (Service, volume_count) tuples. """ return IMPL.service_get_all_volume_sorted(context) @@ -241,7 +242,7 @@ def floating_ip_count_by_project(context, project_id): def floating_ip_deallocate(context, address): - """Deallocate an floating ip by address""" + """Deallocate an floating ip by address.""" return IMPL.floating_ip_deallocate(context, address) @@ -253,7 +254,7 @@ def floating_ip_destroy(context, address): def floating_ip_disassociate(context, address): """Disassociate an floating ip from a fixed ip by address. - Returns the address of the existing fixed ip. + :returns: the address of the existing fixed ip. """ return IMPL.floating_ip_disassociate(context, address) @@ -291,25 +292,30 @@ def floating_ip_update(context, address, values): return IMPL.floating_ip_update(context, address, values) +def floating_ip_set_auto_assigned(context, address): + """Set auto_assigned flag to floating ip""" + return IMPL.floating_ip_set_auto_assigned(context, address) + #################### + def migration_update(context, id, values): - """Update a migration instance""" + """Update a migration instance.""" return IMPL.migration_update(context, id, values) def migration_create(context, values): - """Create a migration record""" + """Create a migration record.""" return IMPL.migration_create(context, values) def migration_get(context, migration_id): - """Finds a migration by the id""" + """Finds a migration by the id.""" return IMPL.migration_get(context, migration_id) def migration_get_by_instance_and_status(context, instance_id, status): - """Finds a migration by the instance id its migrating""" + """Finds a migration by the instance id its migrating.""" return IMPL.migration_get_by_instance_and_status(context, instance_id, status) @@ -397,7 +403,7 @@ def instance_create(context, values): def instance_data_get_for_project(context, project_id): - """Get (instance_count, core_count) for project.""" + """Get (instance_count, total_cores, total_ram) for project.""" return IMPL.instance_data_get_for_project(context, project_id) @@ -455,11 +461,6 @@ def instance_get_project_vpn(context, project_id): return IMPL.instance_get_project_vpn(context, project_id) -def instance_is_vpn(context, instance_id): - """True if instance is a vpn.""" - return IMPL.instance_is_vpn(context, instance_id) - - def instance_set_state(context, instance_id, state, description=None): """Set the state of an instance.""" return IMPL.instance_set_state(context, instance_id, state, description) @@ -579,7 +580,9 @@ def network_create_safe(context, values): def network_delete_safe(context, network_id): """Delete network with key network_id. + This method assumes that the network is not associated with any project + """ return IMPL.network_delete_safe(context, network_id) @@ -674,7 +677,6 @@ def project_get_network(context, project_id, associate=True): network if one is not found, otherwise it returns None. """ - return IMPL.project_get_network(context, project_id, associate) @@ -722,7 +724,9 @@ def iscsi_target_create_safe(context, values): The device is not returned. If the create violates the unique constraints because the iscsi_target and host already exist, - no exception is raised.""" + no exception is raised. + + """ return IMPL.iscsi_target_create_safe(context, values) @@ -752,24 +756,34 @@ def auth_token_create(context, token): ################### -def quota_create(context, values): - """Create a quota from the values dictionary.""" - return IMPL.quota_create(context, values) +def quota_create(context, project_id, resource, limit): + """Create a quota for the given project and resource.""" + return IMPL.quota_create(context, project_id, resource, limit) -def quota_get(context, project_id): +def quota_get(context, project_id, resource): """Retrieve a quota or raise if it does not exist.""" - return IMPL.quota_get(context, project_id) + return IMPL.quota_get(context, project_id, resource) + + +def quota_get_all_by_project(context, project_id): + """Retrieve all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) -def quota_update(context, project_id, values): - """Update a quota from the values dictionary.""" - return IMPL.quota_update(context, project_id, values) +def quota_update(context, project_id, resource, limit): + """Update a quota or raise if it does not exist.""" + return IMPL.quota_update(context, project_id, resource, limit) -def quota_destroy(context, project_id): +def quota_destroy(context, project_id, resource): """Destroy the quota or raise if it does not exist.""" - return IMPL.quota_destroy(context, project_id) + return IMPL.quota_destroy(context, project_id, resource) + + +def quota_destroy_all_by_project(context, project_id): + """Destroy all quotas associated with a given project.""" + return IMPL.quota_get_all_by_project(context, project_id) ################### @@ -1063,10 +1077,7 @@ def project_delete(context, project_id): def host_get_networks(context, host): - """Return all networks for which the given host is the designated - network host. - - """ + """All networks for which the given host is the network host.""" return IMPL.host_get_networks(context, host) @@ -1128,33 +1139,40 @@ def console_get(context, console_id, instance_id=None): def instance_type_create(context, values): - """Create a new instance type""" + """Create a new instance type.""" return IMPL.instance_type_create(context, values) def instance_type_get_all(context, inactive=False): - """Get all instance types""" + """Get all instance types.""" return IMPL.instance_type_get_all(context, inactive) +def instance_type_get_by_id(context, id): + """Get instance type by id.""" + return IMPL.instance_type_get_by_id(context, id) + + def instance_type_get_by_name(context, name): - """Get instance type by name""" + """Get instance type by name.""" return IMPL.instance_type_get_by_name(context, name) def instance_type_get_by_flavor_id(context, id): - """Get instance type by name""" + """Get instance type by name.""" return IMPL.instance_type_get_by_flavor_id(context, id) def instance_type_destroy(context, name): - """Delete a instance type""" + """Delete a instance type.""" return IMPL.instance_type_destroy(context, name) def instance_type_purge(context, name): - """Purges (removes) an instance type from DB - Use instance_type_destroy for most cases + """Purges (removes) an instance type from DB. + + Use instance_type_destroy for most cases + """ return IMPL.instance_type_purge(context, name) @@ -1191,15 +1209,15 @@ def zone_get_all(context): def instance_metadata_get(context, instance_id): - """Get all metadata for an instance""" + """Get all metadata for an instance.""" return IMPL.instance_metadata_get(context, instance_id) def instance_metadata_delete(context, instance_id, key): - """Delete the given metadata item""" + """Delete the given metadata item.""" IMPL.instance_metadata_delete(context, instance_id, key) def instance_metadata_update_or_create(context, instance_id, metadata): - """Create or update instance metadata""" + """Create or update instance metadata.""" IMPL.instance_metadata_update_or_create(context, instance_id, metadata) diff --git a/nova/db/base.py b/nova/db/base.py index a0f2180c6..a0d055d5b 100644 --- a/nova/db/base.py +++ b/nova/db/base.py @@ -16,20 +16,20 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Base class for classes that need modular database access. -""" +"""Base class for classes that need modular database access.""" from nova import utils from nova import flags + FLAGS = flags.FLAGS flags.DEFINE_string('db_driver', 'nova.db.api', 'driver to use for database access') class Base(object): - """DB driver is injected in the init method""" + """DB driver is injected in the init method.""" + def __init__(self, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver diff --git a/nova/db/migration.py b/nova/db/migration.py index e54b90cd8..ccd06cffe 100644 --- a/nova/db/migration.py +++ b/nova/db/migration.py @@ -15,11 +15,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Database setup and migration commands.""" from nova import flags from nova import utils + FLAGS = flags.FLAGS flags.DECLARE('db_backend', 'nova.db.api') diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 10bbe715c..939e02a84 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -25,6 +25,7 @@ import warnings from nova import db from nova import exception from nova import flags +from nova import ipv6 from nova import utils from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session @@ -94,7 +95,7 @@ def require_admin_context(f): """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]): - raise exception.NotAuthorized() + raise exception.AdminRequired() return f(*args, **kwargs) return wrapper @@ -105,7 +106,7 @@ def require_context(f): """ def wrapper(*args, **kwargs): if not is_admin_context(args[0]) and not is_user_context(args[0]): - raise exception.NotAuthorized() + raise exception.AdminRequired() return f(*args, **kwargs) return wrapper @@ -137,7 +138,7 @@ def service_get(context, service_id, session=None): first() if not result: - raise exception.NotFound(_('No service for id %s') % service_id) + raise exception.ServiceNotFound(service_id=service_id) return result @@ -196,8 +197,7 @@ def service_get_all_compute_by_host(context, host): all() if not result: - raise exception.NotFound(_("%s does not exist or is not " - "a compute node.") % host) + raise exception.ComputeHostNotFound(host=host) return result @@ -284,8 +284,7 @@ def service_get_by_args(context, host, binary): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound(_('No service for %(host)s, %(binary)s') - % locals()) + raise exception.HostBinaryNotFound(host=host, binary=binary) return result @@ -323,7 +322,7 @@ def compute_node_get(context, compute_id, session=None): first() if not result: - raise exception.NotFound(_('No computeNode for id %s') % compute_id) + raise exception.ComputeHostNotFound(host=compute_id) return result @@ -359,7 +358,7 @@ def certificate_get(context, certificate_id, session=None): first() if not result: - raise exception.NotFound('No certificate for id %s' % certificate_id) + raise exception.CertificateNotFound(certificate_id=certificate_id) return result @@ -461,6 +460,7 @@ def floating_ip_count_by_project(context, project_id): session = get_session() return session.query(models.FloatingIp).\ filter_by(project_id=project_id).\ + filter_by(auto_assigned=False).\ filter_by(deleted=False).\ count() @@ -489,6 +489,7 @@ def floating_ip_deallocate(context, address): address, session=session) floating_ip_ref['project_id'] = None + floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @@ -522,6 +523,17 @@ def floating_ip_disassociate(context, address): return fixed_ip_address +@require_context +def floating_ip_set_auto_assigned(context, address): + session = get_session() + with session.begin(): + floating_ip_ref = floating_ip_get_by_address(context, + address, + session=session) + floating_ip_ref.auto_assigned = True + floating_ip_ref.save(session=session) + + @require_admin_context def floating_ip_get_all(context): session = get_session() @@ -548,6 +560,7 @@ def floating_ip_get_all_by_project(context, project_id): return session.query(models.FloatingIp).\ options(joinedload_all('fixed_ip.instance')).\ filter_by(project_id=project_id).\ + filter_by(auto_assigned=False).\ filter_by(deleted=False).\ all() @@ -564,7 +577,7 @@ def floating_ip_get_by_address(context, address, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound('No floating ip for address %s' % address) + raise exception.FloatingIpNotFound(fixed_ip=address) return result @@ -672,7 +685,7 @@ def fixed_ip_get_all(context, session=None): session = get_session() result = session.query(models.FixedIp).all() if not result: - raise exception.NotFound(_('No fixed ips defined')) + raise exception.NoFloatingIpsDefined() return result @@ -688,7 +701,7 @@ def fixed_ip_get_all_by_host(context, host=None): all() if not result: - raise exception.NotFound(_('No fixed ips for this host defined')) + raise exception.NoFloatingIpsDefinedForHost(host=host) return result @@ -704,7 +717,7 @@ def fixed_ip_get_by_address(context, address, session=None): options(joinedload('instance')).\ first() if not result: - raise exception.NotFound(_('No floating ip for address %s') % address) + raise exception.FloatingIpNotFound(fixed_ip=address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) @@ -725,14 +738,14 @@ def fixed_ip_get_all_by_instance(context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False) if not rv: - raise exception.NotFound(_('No address for instance %s') % instance_id) + raise exception.NoFloatingIpsFoundForInstance(instance_id=instance_id) return rv @require_context def fixed_ip_get_instance_v6(context, address): session = get_session() - mac = utils.to_mac(address) + mac = ipv6.to_mac(address) result = session.query(models.Instance).\ filter_by(mac_address=mac).\ @@ -770,9 +783,10 @@ def instance_create(context, values): metadata = values.get('metadata') metadata_refs = [] if metadata: - for metadata_item in metadata: + for k, v in metadata.iteritems(): metadata_ref = models.InstanceMetadata() - metadata_ref.update(metadata_item) + metadata_ref['key'] = k + metadata_ref['value'] = v metadata_refs.append(metadata_ref) values['metadata'] = metadata_refs @@ -789,12 +803,13 @@ def instance_create(context, values): def instance_data_get_for_project(context, project_id): session = get_session() result = session.query(func.count(models.Instance.id), - func.sum(models.Instance.vcpus)).\ + func.sum(models.Instance.vcpus), + func.sum(models.Instance.memory_mb)).\ filter_by(project_id=project_id).\ filter_by(deleted=False).\ first() # NOTE(vish): convert None to 0 - return (result[0] or 0, result[1] or 0) + return (result[0] or 0, result[1] or 0, result[2] or 0) @require_context @@ -803,17 +818,17 @@ def instance_destroy(context, instance_id): with session.begin(): session.query(models.Instance).\ filter_by(id=instance_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': datetime.datetime.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': datetime.datetime.utcnow(), 'updated_at': literal_column('updated_at')}) session.query(models.InstanceMetadata).\ filter_by(instance_id=instance_id).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': datetime.datetime.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -831,6 +846,7 @@ def instance_get(context, instance_id, session=None): options(joinedload('volumes')).\ options(joinedload_all('fixed_ip.network')).\ options(joinedload('metadata')).\ + options(joinedload('instance_type')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -840,14 +856,13 @@ def instance_get(context, instance_id, session=None): options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ + options(joinedload('instance_type')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ first() if not result: - raise exception.InstanceNotFound(_('Instance %s not found') - % instance_id, - instance_id) + raise exception.InstanceNotFound(instance_id=instance_id) return result @@ -859,6 +874,8 @@ def instance_get_all(context): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -870,6 +887,8 @@ def instance_get_all_by_user(context, user_id): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(user_id=user_id).\ all() @@ -882,6 +901,7 @@ def instance_get_all_by_host(context, host): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('instance_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -896,6 +916,7 @@ def instance_get_all_by_project(context, project_id): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -910,6 +931,7 @@ def instance_get_all_by_reservation(context, reservation_id): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('instance_type')).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=can_read_deleted(context)).\ all() @@ -918,6 +940,7 @@ def instance_get_all_by_reservation(context, reservation_id): options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('instance_type')).\ filter_by(project_id=context.project_id).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=False).\ @@ -930,8 +953,9 @@ def instance_get_project_vpn(context, project_id): return session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ - filter_by(image_id=FLAGS.vpn_image_id).\ + filter_by(image_id=str(FLAGS.vpn_image_id)).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -954,7 +978,8 @@ def instance_get_fixed_address_v6(context, instance_id): network_ref = network_get_by_instance(context, instance_id) prefix = network_ref.cidr_v6 mac = instance_ref.mac_address - return utils.to_global_ipv6(prefix, mac) + project_id = instance_ref.project_id + return ipv6.to_global(prefix, mac, project_id) @require_context @@ -971,13 +996,6 @@ def instance_get_floating_address(context, instance_id): @require_admin_context -def instance_is_vpn(context, instance_id): - # TODO(vish): Move this into image code somewhere - instance_ref = instance_get(context, instance_id) - return instance_ref['image_id'] == FLAGS.vpn_image_id - - -@require_admin_context def instance_set_state(context, instance_id, state, description=None): # TODO(devcamcar): Move this out of models and into driver from nova.compute import power_state @@ -1116,8 +1134,7 @@ def key_pair_get(context, user_id, name, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.NotFound(_('no keypair for user %(user_id)s,' - ' name %(name)s') % locals()) + raise exception.KeypairNotFound(user_id=user_id, name=name) return result @@ -1243,7 +1260,7 @@ def network_get(context, network_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.NotFound(_('No network for id %s') % network_id) + raise exception.NetworkNotFound(network_id=network_id) return result @@ -1253,7 +1270,7 @@ def network_get_all(context): session = get_session() result = session.query(models.Network) if not result: - raise exception.NotFound(_('No networks defined')) + raise exception.NoNetworksFound() return result @@ -1282,7 +1299,7 @@ def network_get_by_bridge(context, bridge): first() if not result: - raise exception.NotFound(_('No network for bridge %s') % bridge) + raise exception.NetworkNotFoundForBridge(bridge=bridge) return result @@ -1293,8 +1310,7 @@ def network_get_by_cidr(context, cidr): filter_by(cidr=cidr).first() if not result: - raise exception.NotFound(_('Network with cidr %s does not exist') % - cidr) + raise exception.NetworkNotFoundForCidr(cidr=cidr) return result @@ -1308,7 +1324,7 @@ def network_get_by_instance(_context, instance_id): filter_by(deleted=False).\ first() if not rv: - raise exception.NotFound(_('No network for instance %s') % instance_id) + raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @@ -1321,7 +1337,7 @@ def network_get_all_by_instance(_context, instance_id): filter_by(instance_id=instance_id).\ filter_by(deleted=False) if not rv: - raise exception.NotFound(_('No network for instance %s') % instance_id) + raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @@ -1335,7 +1351,7 @@ def network_set_host(context, network_id, host_id): with_lockmode('update').\ first() if not network_ref: - raise exception.NotFound(_('No network for id %s') % network_id) + raise exception.NetworkNotFound(network_id=network_id) # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues @@ -1460,7 +1476,7 @@ def auth_token_get(context, token_hash, session=None): filter_by(deleted=can_read_deleted(context)).\ first() if not tk: - raise exception.NotFound(_('Token %s does not exist') % token_hash) + raise exception.AuthTokenNotFound(token=token_hash) return tk @@ -1484,46 +1500,72 @@ def auth_token_create(_context, token): ################### -@require_admin_context -def quota_get(context, project_id, session=None): +@require_context +def quota_get(context, project_id, resource, session=None): if not session: session = get_session() - result = session.query(models.Quota).\ filter_by(project_id=project_id).\ - filter_by(deleted=can_read_deleted(context)).\ + filter_by(resource=resource).\ + filter_by(deleted=False).\ first() if not result: - raise exception.NotFound(_('No quota for project_id %s') % project_id) + raise exception.ProjectQuotaNotFound(project_id=project_id) + return result + +@require_context +def quota_get_all_by_project(context, project_id): + session = get_session() + result = {'project_id': project_id} + rows = session.query(models.Quota).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + all() + for row in rows: + result[row.resource] = row.hard_limit return result @require_admin_context -def quota_create(context, values): +def quota_create(context, project_id, resource, limit): quota_ref = models.Quota() - quota_ref.update(values) + quota_ref.project_id = project_id + quota_ref.resource = resource + quota_ref.hard_limit = limit quota_ref.save() return quota_ref @require_admin_context -def quota_update(context, project_id, values): +def quota_update(context, project_id, resource, limit): session = get_session() with session.begin(): - quota_ref = quota_get(context, project_id, session=session) - quota_ref.update(values) + quota_ref = quota_get(context, project_id, resource, session=session) + quota_ref.hard_limit = limit quota_ref.save(session=session) @require_admin_context -def quota_destroy(context, project_id): +def quota_destroy(context, project_id, resource): session = get_session() with session.begin(): - quota_ref = quota_get(context, project_id, session=session) + quota_ref = quota_get(context, project_id, resource, session=session) quota_ref.delete(session=session) +@require_admin_context +def quota_destroy_all_by_project(context, project_id): + session = get_session() + with session.begin(): + quotas = session.query(models.Quota).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).\ + all() + for quota_ref in quotas: + quota_ref.delete(session=session) + + ################### @@ -1649,8 +1691,7 @@ def volume_get(context, volume_id, session=None): filter_by(deleted=False).\ first() if not result: - raise exception.VolumeNotFound(_('Volume %s not found') % volume_id, - volume_id) + raise exception.VolumeNotFound(volume_id=volume_id) return result @@ -1682,7 +1723,7 @@ def volume_get_all_by_instance(context, instance_id): filter_by(deleted=False).\ all() if not result: - raise exception.NotFound(_('No volume for instance %s') % instance_id) + raise exception.VolumeNotFoundForInstance(instance_id=instance_id) return result @@ -1707,8 +1748,7 @@ def volume_get_instance(context, volume_id): options(joinedload('instance')).\ first() if not result: - raise exception.VolumeNotFound(_('Volume %s not found') % volume_id, - volume_id) + raise exception.VolumeNotFound(volume_id=volume_id) return result.instance @@ -1720,8 +1760,7 @@ def volume_get_shelf_and_blade(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound(_('No export device found for volume %s') % - volume_id) + raise exception.ExportDeviceNotFoundForVolume(volume_id=volume_id) return (result.shelf_id, result.blade_id) @@ -1733,8 +1772,7 @@ def volume_get_iscsi_target_num(context, volume_id): filter_by(volume_id=volume_id).\ first() if not result: - raise exception.NotFound(_('No target id found for volume %s') % - volume_id) + raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id) return result.target_num @@ -1778,8 +1816,8 @@ def security_group_get(context, security_group_id, session=None): options(joinedload_all('rules')).\ first() if not result: - raise exception.NotFound(_("No security group with id %s") % - security_group_id) + raise exception.SecurityGroupNotFound( + security_group_id=security_group_id) return result @@ -1794,9 +1832,8 @@ def security_group_get_by_name(context, project_id, group_name): options(joinedload_all('instances')).\ first() if not result: - raise exception.NotFound( - _('No security group named %(group_name)s' - ' for project: %(project_id)s') % locals()) + raise exception.SecurityGroupNotFoundForProject(project_id=project_id, + security_group_id=group_name) return result @@ -1826,7 +1863,7 @@ def security_group_get_by_instance(context, instance_id): def security_group_exists(context, project_id, group_name): try: group = security_group_get_by_name(context, project_id, group_name) - return group != None + return group is not None except exception.NotFound: return False @@ -1897,8 +1934,8 @@ def security_group_rule_get(context, security_group_rule_id, session=None): filter_by(id=security_group_rule_id).\ first() if not result: - raise exception.NotFound(_("No secuity group rule with id %s") % - security_group_rule_id) + raise exception.SecurityGroupNotFoundForRule( + rule_id=security_group_rule_id) return result @@ -1994,7 +2031,7 @@ def user_get(context, id, session=None): first() if not result: - raise exception.NotFound(_('No user for id %s') % id) + raise exception.UserNotFound(user_id=id) return result @@ -2010,7 +2047,7 @@ def user_get_by_access_key(context, access_key, session=None): first() if not result: - raise exception.NotFound(_('No user for access key %s') % access_key) + raise exception.AccessKeyNotFound(access_key=access_key) return result @@ -2075,7 +2112,7 @@ def project_get(context, id, session=None): first() if not result: - raise exception.NotFound(_("No project with id %s") % id) + raise exception.ProjectNotFound(project_id=id) return result @@ -2096,7 +2133,7 @@ def project_get_by_user(context, user_id): options(joinedload_all('projects')).\ first() if not user: - raise exception.NotFound(_('Invalid user_id %s') % user_id) + raise exception.UserNotFound(user_id=user_id) return user.projects @@ -2236,8 +2273,7 @@ def migration_get(context, id, session=None): result = session.query(models.Migration).\ filter_by(id=id).first() if not result: - raise exception.NotFound(_("No migration found with id %s") - % id) + raise exception.MigrationNotFound(migration_id=id) return result @@ -2248,8 +2284,8 @@ def migration_get_by_instance_and_status(context, instance_id, status): filter_by(instance_id=instance_id).\ filter_by(status=status).first() if not result: - raise exception.NotFound(_("No migration found for instance " - "%(instance_id)s with status %(status)s") % locals()) + raise exception.MigrationNotFoundByStatus(instance_id=instance_id, + status=status) return result @@ -2270,8 +2306,7 @@ def console_pool_get(context, pool_id): filter_by(id=pool_id).\ first() if not result: - raise exception.NotFound(_("No console pool with id %(pool_id)s") - % locals()) + raise exception.ConsolePoolNotFound(pool_id=pool_id) return result @@ -2287,9 +2322,9 @@ def console_pool_get_by_host_type(context, compute_host, host, options(joinedload('consoles')).\ first() if not result: - raise exception.NotFound(_('No console pool of type %(console_type)s ' - 'for compute host %(compute_host)s ' - 'on proxy host %(host)s') % locals()) + raise exception.ConsolePoolNotFoundForHostType(host=host, + console_type=console_type, + compute_host=compute_host) return result @@ -2327,8 +2362,8 @@ def console_get_by_pool_instance(context, pool_id, instance_id): options(joinedload('pool')).\ first() if not result: - raise exception.NotFound(_('No console for instance %(instance_id)s ' - 'in pool %(pool_id)s') % locals()) + raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id, + instance_id=instance_id) return result @@ -2349,9 +2384,11 @@ def console_get(context, console_id, instance_id=None): query = query.filter_by(instance_id=instance_id) result = query.options(joinedload('pool')).first() if not result: - idesc = (_("on instance %s") % instance_id) if instance_id else "" - raise exception.NotFound(_("No console with id %(console_id)s" - " %(idesc)s") % locals()) + if instance_id: + raise exception.ConsoleNotFoundForInstance(console_id=console_id, + instance_id=instance_id) + else: + raise exception.ConsoleNotFound(console_id=console_id) return result @@ -2390,7 +2427,20 @@ def instance_type_get_all(context, inactive=False): inst_dict[i['name']] = dict(i) return inst_dict else: - raise exception.NotFound + raise exception.NoInstanceTypesFound() + + +@require_context +def instance_type_get_by_id(context, id): + """Returns a dict describing specific instance_type""" + session = get_session() + inst_type = session.query(models.InstanceTypes).\ + filter_by(id=id).\ + first() + if not inst_type: + raise exception.InstanceTypeNotFound(instance_type=id) + else: + return dict(inst_type) @require_context @@ -2401,7 +2451,7 @@ def instance_type_get_by_name(context, name): filter_by(name=name).\ first() if not inst_type: - raise exception.NotFound(_("No instance type with name %s") % name) + raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return dict(inst_type) @@ -2414,7 +2464,7 @@ def instance_type_get_by_flavor_id(context, id): filter_by(flavorid=int(id)).\ first() if not inst_type: - raise exception.NotFound(_("No flavor with flavorid %s") % id) + raise exception.FlavorNotFound(flavor_id=id) else: return dict(inst_type) @@ -2427,7 +2477,7 @@ def instance_type_destroy(context, name): filter_by(name=name) records = instance_type_ref.update(dict(deleted=True)) if records == 0: - raise exception.NotFound + raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return instance_type_ref @@ -2442,7 +2492,7 @@ def instance_type_purge(context, name): filter_by(name=name) records = instance_type_ref.delete() if records == 0: - raise exception.NotFound + raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: return instance_type_ref @@ -2463,7 +2513,7 @@ def zone_update(context, zone_id, values): session = get_session() zone = session.query(models.Zone).filter_by(id=zone_id).first() if not zone: - raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + raise exception.ZoneNotFound(zone_id=zone_id) zone.update(values) zone.save() return zone @@ -2483,7 +2533,7 @@ def zone_get(context, zone_id): session = get_session() result = session.query(models.Zone).filter_by(id=zone_id).first() if not result: - raise exception.NotFound(_("No zone with id %(zone_id)s") % locals()) + raise exception.ZoneNotFound(zone_id=zone_id) return result @@ -2517,7 +2567,7 @@ def instance_metadata_delete(context, instance_id, key): filter_by(instance_id=instance_id).\ filter_by(key=key).\ filter_by(deleted=False).\ - update({'deleted': 1, + update({'deleted': True, 'deleted_at': datetime.datetime.utcnow(), 'updated_at': literal_column('updated_at')}) @@ -2533,8 +2583,8 @@ def instance_metadata_get_item(context, instance_id, key): first() if not meta_result: - raise exception.NotFound(_('Invalid metadata key for instance %s') % - instance_id) + raise exception.InstanceMetadataNotFound(metadata_key=key, + instance_id=instance_id) return meta_result diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py index 9e7ab3554..63bbaccc1 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -17,15 +17,13 @@ # under the License. ## Table code mostly autogenerated by genmodel.py -from sqlalchemy import * -from migrate import * - +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import ForeignKeyConstraint, Integer, MetaData, String +from sqlalchemy import Table, Text from nova import log as logging - meta = MetaData() - auth_tokens = Table('auth_tokens', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index 413536a59..9bb8a8ada 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -16,15 +16,12 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - +from sqlalchemy import Boolean, Column, DateTime, ForeignKey +from sqlalchemy import Integer, MetaData, String, Table, Text from nova import log as logging - meta = MetaData() - # Just for the ForeignKey and column creation to succeed, these are not the # actual definitions of instances or services. instances = Table('instances', meta, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py index 5ba7910f1..8e0de4d2b 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py @@ -15,15 +15,10 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - -from nova import log as logging - +from sqlalchemy import Column, Integer, MetaData, String, Table meta = MetaData() - networks = Table('networks', meta, Column('id', Integer(), primary_key=True, nullable=False), ) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py index ade981687..0abea374c 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py @@ -13,15 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table from nova import log as logging - meta = MetaData() - # # New Tables # diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py index 4cb07e0d8..a1a86e3b4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py @@ -15,15 +15,12 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table from nova import log as logging - meta = MetaData() - # Just for the ForeignKey and column creation to succeed, these are not the # actual definitions of instances or services. instances = Table('instances', meta, diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py index 705fc8ff3..4627d3332 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py @@ -15,11 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - -from nova import log as logging - +from sqlalchemy import Column, Integer, MetaData, String, Table meta = MetaData() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py index 427934d53..6f2668040 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py @@ -13,15 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - -from nova import log as logging - +from sqlalchemy import Column, Integer, MetaData, String, Table meta = MetaData() - # Table stub-definitions # Just for the ForeignKey and column creation to succeed, these are not the # actual definitions of instances or services. diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py index 5e2cb69d9..63999f6ff 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py @@ -13,15 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - -from nova import api -from nova import db +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table from nova import log as logging -import datetime - meta = MetaData() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py index 4fda525f1..0f2d0079a 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py @@ -15,12 +15,10 @@ # License for the specific language governing permissions and limitations # under the License.from sqlalchemy import * -from sqlalchemy import * -from migrate import * - +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table from nova import log as logging - meta = MetaData() # Just for the ForeignKey and column creation to succeed, these are not the diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py index eb3066894..a5b80586e 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py @@ -14,12 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from sqlalchemy.sql import text -from migrate import * - -from nova import log as logging - +from sqlalchemy import Column, Integer, MetaData, String, Table meta = MetaData() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py index 23ccccb4e..b2b0256d2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py @@ -16,10 +16,9 @@ # License for the specific language governing permissions and limitations # under the License. -from migrate import * +from sqlalchemy import Boolean, Column, DateTime, Integer, MetaData +from sqlalchemy import Table, Text from nova import log as logging -from sqlalchemy import * - meta = MetaData() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py index e87085668..10d250522 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py @@ -13,15 +13,11 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * -from migrate import * - -from nova import log as logging - +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table meta = MetaData() - # Table stub-definitions # Just for the ForeignKey and column creation to succeed, these are not the # actual definitions of instances or services. diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py index 3fb92e85c..7246839b7 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py @@ -15,11 +15,7 @@ # License for the specific language governing permissions and limitations # under the License.from sqlalchemy import * -from sqlalchemy import * -from migrate import * - -from nova import log as logging - +from sqlalchemy import Column, Integer, MetaData, Table meta = MetaData() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py new file mode 100644 index 000000000..62216be12 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py @@ -0,0 +1,81 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +c_instance_type = Column('instance_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + +c_instance_type_id = Column('instance_type_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + +instance_types = Table('instance_types', meta, + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + instances.create_column(c_instance_type_id) + + type_names = {} + recs = migrate_engine.execute(instance_types.select()) + for row in recs: + type_names[row[0]] = row[1] + + for type_id, type_name in type_names.iteritems(): + migrate_engine.execute(instances.update()\ + .where(instances.c.instance_type == type_name)\ + .values(instance_type_id=type_id)) + + instances.c.instance_type.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + instances.create_column(c_instance_type) + + recs = migrate_engine.execute(instance_types.select()) + for row in recs: + type_id = row[0] + type_name = row[1] + migrate_engine.execute(instances.update()\ + .where(instances.c.instance_type_id == type_id)\ + .values(instance_type=type_name)) + + instances.c.instance_type_id.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py new file mode 100644 index 000000000..375760c84 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py @@ -0,0 +1,35 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Grid Dynamics +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, MetaData, Table + +meta = MetaData() + +c_auto_assigned = Column('auto_assigned', Boolean, default=False) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + floating_ips = Table('floating_ips', + meta, + autoload=True, + autoload_with=migrate_engine) + + floating_ips.create_column(c_auto_assigned) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py new file mode 100644 index 000000000..a2d8192ca --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/016_make_quotas_key_and_value.py @@ -0,0 +1,203 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table + +import datetime + +meta = MetaData() + +resources = [ + 'instances', + 'cores', + 'volumes', + 'gigabytes', + 'floating_ips', + 'metadata_items', +] + + +def old_style_quotas_table(name): + return Table(name, meta, + Column('id', Integer(), primary_key=True), + Column('created_at', DateTime(), + default=datetime.datetime.utcnow), + Column('updated_at', DateTime(), + onupdate=datetime.datetime.utcnow), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), default=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + Column('metadata_items', Integer()), + ) + + +def new_style_quotas_table(name): + return Table(name, meta, + Column('id', Integer(), primary_key=True), + Column('created_at', DateTime(), + default=datetime.datetime.utcnow), + Column('updated_at', DateTime(), + onupdate=datetime.datetime.utcnow), + Column('deleted_at', DateTime()), + Column('deleted', Boolean(), default=False), + Column('project_id', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)), + Column('resource', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=False), + Column('hard_limit', Integer(), nullable=True), + ) + + +def existing_quotas_table(migrate_engine): + return Table('quotas', meta, autoload=True, autoload_with=migrate_engine) + + +def _assert_no_duplicate_project_ids(quotas): + project_ids = set() + message = ('There are multiple active quotas for project "%s" ' + '(among others, possibly). ' + 'Please resolve all ambiguous quotas before ' + 'reattempting the migration.') + for quota in quotas: + assert quota.project_id not in project_ids, message % quota.project_id + project_ids.add(quota.project_id) + + +def assert_old_quotas_have_no_active_duplicates(migrate_engine, quotas): + """Ensure that there are no duplicate non-deleted quota entries.""" + select = quotas.select().where(quotas.c.deleted == False) + results = migrate_engine.execute(select) + _assert_no_duplicate_project_ids(list(results)) + + +def assert_new_quotas_have_no_active_duplicates(migrate_engine, quotas): + """Ensure that there are no duplicate non-deleted quota entries.""" + for resource in resources: + select = quotas.select().\ + where(quotas.c.deleted == False).\ + where(quotas.c.resource == resource) + results = migrate_engine.execute(select) + _assert_no_duplicate_project_ids(list(results)) + + +def convert_forward(migrate_engine, old_quotas, new_quotas): + quotas = list(migrate_engine.execute(old_quotas.select())) + for quota in quotas: + for resource in resources: + hard_limit = getattr(quota, resource) + if hard_limit is None: + continue + insert = new_quotas.insert().values( + created_at=quota.created_at, + updated_at=quota.updated_at, + deleted_at=quota.deleted_at, + deleted=quota.deleted, + project_id=quota.project_id, + resource=resource, + hard_limit=hard_limit) + migrate_engine.execute(insert) + + +def earliest(date1, date2): + if date1 is None and date2 is None: + return None + if date1 is None: + return date2 + if date2 is None: + return date1 + if date1 < date2: + return date1 + return date2 + + +def latest(date1, date2): + if date1 is None and date2 is None: + return None + if date1 is None: + return date2 + if date2 is None: + return date1 + if date1 > date2: + return date1 + return date2 + + +def convert_backward(migrate_engine, old_quotas, new_quotas): + quotas = {} + for quota in migrate_engine.execute(new_quotas.select()): + if (quota.resource not in resources + or quota.hard_limit is None or quota.deleted): + continue + if not quota.project_id in quotas: + quotas[quota.project_id] = { + 'project_id': quota.project_id, + 'created_at': quota.created_at, + 'updated_at': quota.updated_at, + quota.resource: quota.hard_limit + } + else: + quotas[quota.project_id]['created_at'] = earliest( + quota.created_at, quotas[quota.project_id]['created_at']) + quotas[quota.project_id]['updated_at'] = latest( + quota.updated_at, quotas[quota.project_id]['updated_at']) + quotas[quota.project_id][quota.resource] = quota.hard_limit + + for quota in quotas.itervalues(): + insert = old_quotas.insert().values(**quota) + migrate_engine.execute(insert) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + old_quotas = existing_quotas_table(migrate_engine) + assert_old_quotas_have_no_active_duplicates(migrate_engine, old_quotas) + + new_quotas = new_style_quotas_table('quotas_new') + new_quotas.create() + convert_forward(migrate_engine, old_quotas, new_quotas) + old_quotas.drop() + new_quotas.rename('quotas') + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta.bind = migrate_engine + + new_quotas = existing_quotas_table(migrate_engine) + assert_new_quotas_have_no_active_duplicates(migrate_engine, new_quotas) + + old_quotas = old_style_quotas_table('quotas_old') + old_quotas.create() + convert_backward(migrate_engine, old_quotas, new_quotas) + new_quotas.drop() + old_quotas.rename('quotas') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py new file mode 100644 index 000000000..cda890c94 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/017_make_instance_type_id_an_integer.py @@ -0,0 +1,68 @@ +from sqlalchemy import Column, Integer, MetaData, String, Table +from nova import log as logging + +meta = MetaData() + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + continue + try: + types[instance.id] = int(instance.instance_type_id) + except ValueError: + logging.warn("Instance %s did not have instance_type_id " + "converted to an integer because its value is %s" % + (instance.id, instance.instance_type_id)) + types[instance.id] = None + + integer_column = Column('instance_type_id_int', Integer(), nullable=True) + string_column = instances.c.instance_type_id + + integer_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_int=instance_type_id) + migrate_engine.execute(update) + + string_column.alter(name='instance_type_id_str') + integer_column.alter(name='instance_type_id') + string_column.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + integer_column = instances.c.instance_type_id + string_column = Column('instance_type_id_str', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + types = {} + for instance in migrate_engine.execute(instances.select()): + if instance.instance_type_id is None: + types[instance.id] = None + else: + types[instance.id] = str(instance.instance_type_id) + + string_column.create(instances) + for instance_id, instance_type_id in types.iteritems(): + update = instances.update().\ + where(instances.c.id == instance_id).\ + values(instance_type_id_str=instance_type_id) + migrate_engine.execute(update) + + integer_column.alter(name='instance_type_id_int') + string_column.alter(name='instance_type_id') + integer_column.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py new file mode 100644 index 000000000..a169afb40 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/018_rename_server_management_url.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table +#from nova import log as logging + +meta = MetaData() + +c_manageent = Column('server_manageent_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + +c_management = Column('server_management_url', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_management) + migrate_engine.execute(tokens.update() + .values(server_management_url=tokens.c.server_manageent_url)) + + tokens.c.server_manageent_url.drop() + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + tokens = Table('auth_tokens', meta, autoload=True, + autoload_with=migrate_engine) + + tokens.create_column(c_manageent) + migrate_engine.execute(tokens.update() + .values(server_manageent_url=tokens.c.server_management_url)) + + tokens.c.server_management_url.drop() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index dda41086a..dc10bcf00 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -209,7 +209,7 @@ class Instance(BASE, NovaBase): hostname = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) - instance_type = Column(String(255)) + instance_type_id = Column(Integer) user_data = Column(Text) @@ -268,6 +268,12 @@ class InstanceTypes(BASE, NovaBase): rxtx_quota = Column(Integer, nullable=False, default=0) rxtx_cap = Column(Integer, nullable=False, default=0) + instances = relationship(Instance, + backref=backref('instance_type', uselist=False), + foreign_keys=id, + primaryjoin='and_(Instance.instance_type_id == ' + 'InstanceTypes.id)') + class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a vm.""" @@ -307,18 +313,20 @@ class Volume(BASE, NovaBase): class Quota(BASE, NovaBase): - """Represents quota overrides for a project.""" + """Represents a single quota override for a project. + + If there is no row for a given project id and resource, then + the default for the deployment is used. If the row is present + but the hard limit is Null, then the resource is unlimited. + """ + __tablename__ = 'quotas' id = Column(Integer, primary_key=True) - project_id = Column(String(255)) + project_id = Column(String(255), index=True) - instances = Column(Integer) - cores = Column(Integer) - volumes = Column(Integer) - gigabytes = Column(Integer) - floating_ips = Column(Integer) - metadata_items = Column(Integer) + resource = Column(String(255)) + hard_limit = Column(Integer, nullable=True) class ExportDevice(BASE, NovaBase): @@ -498,7 +506,7 @@ class AuthToken(BASE, NovaBase): __tablename__ = 'auth_tokens' token_hash = Column(String(255), primary_key=True) user_id = Column(String(255)) - server_manageent_url = Column(String(255)) + server_management_url = Column(String(255)) storage_url = Column(String(255)) cdn_management_url = Column(String(255)) @@ -597,6 +605,7 @@ class FloatingIp(BASE, NovaBase): 'FloatingIp.deleted == False)') project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) + auto_assigned = Column(Boolean, default=False, nullable=False) class ConsolePool(BASE, NovaBase): diff --git a/nova/exception.py b/nova/exception.py index 4e2bbdbaf..56c20d111 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -16,88 +16,55 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Nova base exception handling, including decorator for re-raising -Nova-type exceptions. SHOULD include dedicated exception logging. +"""Nova base exception handling. + +Includes decorator for re-raising Nova-type exceptions. + +SHOULD include dedicated exception logging. + """ from nova import log as logging + + LOG = logging.getLogger('nova.exception') class ProcessExecutionError(IOError): - def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None, description=None): if description is None: - description = _("Unexpected error while running command.") + description = _('Unexpected error while running command.') if exit_code is None: exit_code = '-' - message = _("%(description)s\nCommand: %(cmd)s\n" - "Exit code: %(exit_code)s\nStdout: %(stdout)r\n" - "Stderr: %(stderr)r") % locals() + message = _('%(description)s\nCommand: %(cmd)s\n' + 'Exit code: %(exit_code)s\nStdout: %(stdout)r\n' + 'Stderr: %(stderr)r') % locals() IOError.__init__(self, message) class Error(Exception): - def __init__(self, message=None): super(Error, self).__init__(message) class ApiError(Error): - def __init__(self, message='Unknown', code='ApiError'): - self.message = message + def __init__(self, message='Unknown', code=None): + self.msg = message self.code = code - super(ApiError, self).__init__('%s: %s' % (code, message)) - - -class NotFound(Error): - pass - - -class InstanceNotFound(NotFound): - def __init__(self, message, instance_id): - self.instance_id = instance_id - super(InstanceNotFound, self).__init__(message) - - -class VolumeNotFound(NotFound): - def __init__(self, message, volume_id): - self.volume_id = volume_id - super(VolumeNotFound, self).__init__(message) - - -class Duplicate(Error): - pass - - -class NotAuthorized(Error): - pass - - -class NotEmpty(Error): - pass - - -class Invalid(Error): - pass - - -class InvalidInputException(Error): - pass + if code: + outstr = '%s: %s' % (code, message) + else: + outstr = '%s' % message + super(ApiError, self).__init__(outstr) -class InvalidContentType(Error): - pass - - -class TimeoutException(Error): +class BuildInProgress(Error): pass class DBError(Error): - """Wraps an implementation specific exception""" + """Wraps an implementation specific exception.""" def __init__(self, inner_exception): self.inner_exception = inner_exception super(DBError, self).__init__(str(inner_exception)) @@ -108,7 +75,7 @@ def wrap_db_error(f): try: return f(*args, **kwargs) except Exception, e: - LOG.exception(_('DB exception wrapped')) + LOG.exception(_('DB exception wrapped.')) raise DBError(e) return _wrap _wrap.func_name = f.func_name @@ -127,3 +94,465 @@ def wrap_exception(f): raise _wrap.func_name = f.func_name return _wrap + + +class NovaException(Exception): + """Base Nova Exception + + To correctly use this class, inherit from it and define + a 'message' property. That message will get printf'd + with the keyword arguments provided to the constructor. + + """ + message = _("An unknown exception occurred.") + + def __init__(self, **kwargs): + try: + self._error_string = self.message % kwargs + + except Exception: + # at least get the core message out if something happened + self._error_string = self.message + + def __str__(self): + return self._error_string + + +class NotAuthorized(NovaException): + message = _("Not authorized.") + + def __init__(self, *args, **kwargs): + super(NotFound, self).__init__(**kwargs) + + +class AdminRequired(NotAuthorized): + message = _("User does not have admin privileges") + + +class Invalid(NovaException): + message = _("Unacceptable parameters.") + + +class InvalidSignature(Invalid): + message = _("Invalid signature %(signature)s for user %(user)s.") + + +class InvalidInput(Invalid): + message = _("Invalid input received") + ": %(reason)s" + + +class InvalidInstanceType(Invalid): + message = _("Invalid instance type %(instance_type)s.") + + +class InvalidPortRange(Invalid): + message = _("Invalid port range %(from_port)s:%(to_port)s.") + + +class InvalidIpProtocol(Invalid): + message = _("Invalid IP protocol %(protocol)s.") + + +class InvalidContentType(Invalid): + message = _("Invalid content type %(content_type)s.") + + +class InstanceNotRunning(Invalid): + message = _("Instance %(instance_id)s is not running.") + + +class InstanceNotSuspended(Invalid): + message = _("Instance %(instance_id)s is not suspended.") + + +class InstanceNotInRescueMode(Invalid): + message = _("Instance %(instance_id)s is not in rescue mode") + + +class InstanceSuspendFailure(Invalid): + message = _("Failed to suspend instance") + ": %(reason)s" + + +class InstanceResumeFailure(Invalid): + message = _("Failed to resume server") + ": %(reason)s." + + +class InstanceRebootFailure(Invalid): + message = _("Failed to reboot instance") + ": %(reason)s" + + +class ServiceUnavailable(Invalid): + message = _("Service is unavailable at this time.") + + +class VolumeServiceUnavailable(ServiceUnavailable): + message = _("Volume service is unavailable at this time.") + + +class ComputeServiceUnavailable(ServiceUnavailable): + message = _("Compute service is unavailable at this time.") + + +class UnableToMigrateToSelf(Invalid): + message = _("Unable to migrate instance (%(instance_id)s) " + "to current host (%(host)s).") + + +class SourceHostUnavailable(Invalid): + message = _("Original compute host is unavailable at this time.") + + +class InvalidHypervisorType(Invalid): + message = _("The supplied hypervisor type of is invalid.") + + +class DestinationHypervisorTooOld(Invalid): + message = _("The instance requires a newer hypervisor version than " + "has been provided.") + + +class InvalidDevicePath(Invalid): + message = _("The supplied device path (%(path)s) is invalid.") + + +class InvalidCPUInfo(Invalid): + message = _("Unacceptable CPU info") + ": %(reason)s" + + +class InvalidVLANTag(Invalid): + message = _("VLAN tag is not appropriate for the port group " + "%(bridge)s. Expected VLAN tag is %(tag)s, " + "but the one associated with the port group is %(pgroup)s.") + + +class InvalidVLANPortGroup(Invalid): + message = _("vSwitch which contains the port group %(bridge)s is " + "not associated with the desired physical adapter. " + "Expected vSwitch is %(expected)s, but the one associated " + "is %(actual)s.") + + +class InvalidDiskFormat(Invalid): + message = _("Disk format %(disk_format)s is not acceptable") + + +class ImageUnacceptable(Invalid): + message = _("Image %(image_id)s is unacceptable") + ": %(reason)s" + + +class InstanceUnacceptable(Invalid): + message = _("Instance %(instance_id)s is unacceptable") + ": %(reason)s" + + +class InvalidEc2Id(Invalid): + message = _("Ec2 id %(ec2_id)s is unacceptable.") + + +class NotFound(NovaException): + message = _("Resource could not be found.") + + def __init__(self, *args, **kwargs): + super(NotFound, self).__init__(**kwargs) + + +class FlagNotSet(NotFound): + message = _("Required flag %(flag)s not set.") + + +class InstanceNotFound(NotFound): + message = _("Instance %(instance_id)s could not be found.") + + +class VolumeNotFound(NotFound): + message = _("Volume %(volume_id)s could not be found.") + + +class VolumeNotFoundForInstance(VolumeNotFound): + message = _("Volume not found for instance %(instance_id)s.") + + +class ExportDeviceNotFoundForVolume(NotFound): + message = _("No export device found for volume %(volume_id)s.") + + +class ISCSITargetNotFoundForVolume(NotFound): + message = _("No target id found for volume %(volume_id)s.") + + +class DiskNotFound(NotFound): + message = _("No disk at %(location)s") + + +class ImageNotFound(NotFound): + message = _("Image %(image_id)s could not be found.") + + +class KernelNotFoundForImage(ImageNotFound): + message = _("Kernel not found for image %(image_id)s.") + + +class RamdiskNotFoundForImage(ImageNotFound): + message = _("Ramdisk not found for image %(image_id)s.") + + +class UserNotFound(NotFound): + message = _("User %(user_id)s could not be found.") + + +class ProjectNotFound(NotFound): + message = _("Project %(project_id)s could not be found.") + + +class ProjectMembershipNotFound(NotFound): + message = _("User %(user_id)s is not a member of project %(project_id)s.") + + +class UserRoleNotFound(NotFound): + message = _("Role %(role_id)s could not be found.") + + +class StorageRepositoryNotFound(NotFound): + message = _("Cannot find SR to read/write VDI.") + + +class NetworkNotFound(NotFound): + message = _("Network %(network_id)s could not be found.") + + +class NetworkNotFoundForBridge(NetworkNotFound): + message = _("Network could not be found for bridge %(bridge)s") + + +class NetworkNotFoundForCidr(NetworkNotFound): + message = _("Network could not be found with cidr %(cidr)s.") + + +class NetworkNotFoundForInstance(NetworkNotFound): + message = _("Network could not be found for instance %(instance_id)s.") + + +class NoNetworksFound(NotFound): + message = _("No networks defined.") + + +class DatastoreNotFound(NotFound): + message = _("Could not find the datastore reference(s) which the VM uses.") + + +class NoFixedIpsFoundForInstance(NotFound): + message = _("Instance %(instance_id)s has zero fixed ips.") + + +class FloatingIpNotFound(NotFound): + message = _("Floating ip not found for fixed address %(fixed_ip)s.") + + +class NoFloatingIpsDefined(NotFound): + message = _("Zero floating ips could be found.") + + +class NoFloatingIpsDefinedForHost(NoFloatingIpsDefined): + message = _("Zero floating ips defined for host %(host)s.") + + +class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined): + message = _("Zero floating ips defined for instance %(instance_id)s.") + + +class KeypairNotFound(NotFound): + message = _("Keypair %(keypair_name)s not found for user %(user_id)s") + + +class CertificateNotFound(NotFound): + message = _("Certificate %(certificate_id)s not found.") + + +class ServiceNotFound(NotFound): + message = _("Service %(service_id)s could not be found.") + + +class HostNotFound(NotFound): + message = _("Host %(host)s could not be found.") + + +class ComputeHostNotFound(HostNotFound): + message = _("Compute host %(host)s could not be found.") + + +class HostBinaryNotFound(NotFound): + message = _("Could not find binary %(binary)s on host %(host)s.") + + +class AuthTokenNotFound(NotFound): + message = _("Auth token %(token)s could not be found.") + + +class AccessKeyNotFound(NotFound): + message = _("Access Key %(access_key)s could not be found.") + + +class QuotaNotFound(NotFound): + message = _("Quota could not be found") + + +class ProjectQuotaNotFound(QuotaNotFound): + message = _("Quota for project %(project_id)s could not be found.") + + +class SecurityGroupNotFound(NotFound): + message = _("Security group %(security_group_id)s not found.") + + +class SecurityGroupNotFoundForProject(SecurityGroupNotFound): + message = _("Security group %(security_group_id)s not found " + "for project %(project_id)s.") + + +class SecurityGroupNotFoundForRule(SecurityGroupNotFound): + message = _("Security group with rule %(rule_id)s not found.") + + +class MigrationNotFound(NotFound): + message = _("Migration %(migration_id)s could not be found.") + + +class MigrationNotFoundByStatus(MigrationNotFound): + message = _("Migration not found for instance %(instance_id)s " + "with status %(status)s.") + + +class ConsolePoolNotFound(NotFound): + message = _("Console pool %(pool_id)s could not be found.") + + +class ConsolePoolNotFoundForHostType(NotFound): + message = _("Console pool of type %(console_type)s " + "for compute host %(compute_host)s " + "on proxy host %(host)s not found.") + + +class ConsoleNotFound(NotFound): + message = _("Console %(console_id)s could not be found.") + + +class ConsoleNotFoundForInstance(ConsoleNotFound): + message = _("Console for instance %(instance_id)s could not be found.") + + +class ConsoleNotFoundInPoolForInstance(ConsoleNotFound): + message = _("Console for instance %(instance_id)s " + "in pool %(pool_id)s could not be found.") + + +class NoInstanceTypesFound(NotFound): + message = _("Zero instance types found.") + + +class InstanceTypeNotFound(NotFound): + message = _("Instance type %(instance_type_id)s could not be found.") + + +class InstanceTypeNotFoundByName(InstanceTypeNotFound): + message = _("Instance type with name %(instance_type_name)s " + "could not be found.") + + +class FlavorNotFound(NotFound): + message = _("Flavor %(flavor_id)s could not be found.") + + +class ZoneNotFound(NotFound): + message = _("Zone %(zone_id)s could not be found.") + + +class SchedulerHostFilterDriverNotFound(NotFound): + message = _("Scheduler Host Filter Driver %(driver_name)s could" + " not be found.") + + +class InstanceMetadataNotFound(NotFound): + message = _("Instance %(instance_id)s has no metadata with " + "key %(metadata_key)s.") + + +class LDAPObjectNotFound(NotFound): + message = _("LDAP object could not be found") + + +class LDAPUserNotFound(LDAPObjectNotFound): + message = _("LDAP user %(user_id)s could not be found.") + + +class LDAPGroupNotFound(LDAPObjectNotFound): + message = _("LDAP group %(group_id)s could not be found.") + + +class LDAPGroupMembershipNotFound(NotFound): + message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.") + + +class FileNotFound(NotFound): + message = _("File %(file_path)s could not be found.") + + +class NoFilesFound(NotFound): + message = _("Zero files could be found.") + + +class SwitchNotFoundForNetworkAdapter(NotFound): + message = _("Virtual switch associated with the " + "network adapter %(adapter)s not found.") + + +class NetworkAdapterNotFound(NotFound): + message = _("Network adapter %(adapter)s could not be found.") + + +class ClassNotFound(NotFound): + message = _("Class %(class_name)s could not be found") + + +class NotAllowed(NovaException): + message = _("Action not allowed.") + + +class GlobalRoleNotAllowed(NotAllowed): + message = _("Unable to use global role %(role_id)s") + + +#TODO(bcwaldon): EOL this exception! +class Duplicate(NovaException): + pass + + +class KeyPairExists(Duplicate): + message = _("Key pair %(key_name)s already exists.") + + +class UserExists(Duplicate): + message = _("User %(user)s already exists.") + + +class LDAPUserExists(UserExists): + message = _("LDAP user %(user)s already exists.") + + +class LDAPGroupExists(Duplicate): + message = _("LDAP group %(group)s already exists.") + + +class LDAPMembershipExists(Duplicate): + message = _("User %(uid)s is already a member of " + "the group %(group_dn)s") + + +class ProjectExists(Duplicate): + message = _("Project %(project)s already exists.") + + +class InstanceExists(Duplicate): + message = _("Instance %(name)s already exists.") + + +class MigrationError(NovaException): + message = _("Migration error") + ": %(reason)s" diff --git a/nova/fakememcache.py b/nova/fakememcache.py index 67f46dbdc..e4f238aa9 100644 --- a/nova/fakememcache.py +++ b/nova/fakememcache.py @@ -18,14 +18,14 @@ """Super simple fake memcache client.""" -import utils +from nova import utils class Client(object): """Replicates a tiny subset of memcached client interface.""" def __init__(self, *args, **kwargs): - """Ignores the passed in args""" + """Ignores the passed in args.""" self.cache = {} def get(self, key): diff --git a/nova/flags.py b/nova/flags.py index f011ab383..9eaac5596 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -16,9 +16,13 @@ # License for the specific language governing permissions and limitations # under the License. -""" +"""Command-line flag library. + +Wraps gflags. + Package-level global flags are defined here, the rest are defined where they're used. + """ import getopt @@ -106,7 +110,7 @@ class FlagValues(gflags.FlagValues): return name in self.__dict__['__dirty'] def ClearDirty(self): - self.__dict__['__is_dirty'] = [] + self.__dict__['__dirty'] = [] def WasAlreadyParsed(self): return self.__dict__['__was_already_parsed'] @@ -115,11 +119,12 @@ class FlagValues(gflags.FlagValues): if '__stored_argv' not in self.__dict__: return new_flags = FlagValues(self) - for k in self.__dict__['__dirty']: + for k in self.FlagDict().iterkeys(): new_flags[k] = gflags.FlagValues.__getitem__(self, k) + new_flags.Reset() new_flags(self.__dict__['__stored_argv']) - for k in self.__dict__['__dirty']: + for k in new_flags.FlagDict().iterkeys(): setattr(self, k, getattr(new_flags, k)) self.ClearDirty() @@ -145,10 +150,12 @@ class FlagValues(gflags.FlagValues): class StrWrapper(object): - """Wrapper around FlagValues objects + """Wrapper around FlagValues objects. Wraps FlagValues objects for string.Template so that we're - sure to return strings.""" + sure to return strings. + + """ def __init__(self, context_objs): self.context_objs = context_objs @@ -169,6 +176,7 @@ def _GetCallingModule(): We generally use this function to get the name of the module calling a DEFINE_foo... function. + """ # Walk down the stack to find the first globals dict that's not ours. for depth in range(1, sys.getrecursionlimit()): @@ -192,6 +200,7 @@ def __GetModuleName(globals_dict): Returns: A string (the name of the module) or None (if the module could not be identified. + """ for name, module in sys.modules.iteritems(): if getattr(module, '__dict__', None) is globals_dict: @@ -316,7 +325,7 @@ DEFINE_string('null_kernel', 'nokernel', 'kernel image that indicates not to use a kernel,' ' but to use a raw disk image instead') -DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server') +DEFINE_integer('vpn_image_id', 0, 'integer id for cloudpipe vpn server') DEFINE_string('vpn_key_suffix', '-vpn', 'Suffix to add to project name for vpn key and secgroups') @@ -326,7 +335,7 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger') DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'), "Top-level directory for maintaining nova's state") DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'), - "Directory for lock files") + 'Directory for lock files') DEFINE_string('logdir', None, 'output to a per-service log file in named ' 'directory') @@ -361,6 +370,12 @@ DEFINE_string('host', socket.gethostname(), DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') +DEFINE_string('notification_driver', + 'nova.notifier.no_op_notifier', + 'Default driver for sending notifications') +DEFINE_list('memcached_servers', None, + 'Memcached servers or None for in process cache.') + DEFINE_string('zone_name', 'nova', 'name of this zone') DEFINE_list('zone_capabilities', ['hypervisor=xenserver;kvm', 'os=linux;windows'], diff --git a/nova/image/fake.py b/nova/image/fake.py index 08302d6eb..b400b2adb 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Implementation of an fake image service""" import copy @@ -44,11 +45,10 @@ class FakeImageService(service.BaseImageService): 'created_at': timestamp, 'updated_at': timestamp, 'status': 'active', - 'type': 'machine', + 'container_format': 'ami', + 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, - 'ramdisk_id': FLAGS.null_kernel, - 'disk_format': 'ami'} - } + 'ramdisk_id': FLAGS.null_kernel}} self.create(None, image) super(FakeImageService, self).__init__() @@ -70,14 +70,14 @@ class FakeImageService(service.BaseImageService): image = self.images.get(image_id) if image: return copy.deepcopy(image) - LOG.warn("Unable to find image id %s. Have images: %s", + LOG.warn('Unable to find image id %s. Have images: %s', image_id, self.images) - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) def create(self, context, data): """Store the image data and return the new image id. - :raises Duplicate if the image already exist. + :raises: Duplicate if the image already exist. """ image_id = int(data['id']) @@ -89,24 +89,24 @@ class FakeImageService(service.BaseImageService): def update(self, context, image_id, data): """Replace the contents of the given image with the new data. - :raises NotFound if the image does not exist. + :raises: ImageNotFound if the image does not exist. """ image_id = int(image_id) if not self.images.get(image_id): - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) self.images[image_id] = copy.deepcopy(data) def delete(self, context, image_id): """Delete the given image. - :raises NotFound if the image does not exist. + :raises: ImageNotFound if the image does not exist. """ image_id = int(image_id) removed = self.images.pop(image_id, None) if not removed: - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) def delete_all(self): """Clears out all images.""" diff --git a/nova/image/glance.py b/nova/image/glance.py index fdf468594..193e37273 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -14,6 +14,7 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + """Implementation of an image service that uses Glance as the backend""" from __future__ import absolute_import @@ -31,16 +32,18 @@ from nova.image import service LOG = logging.getLogger('nova.image.glance') + FLAGS = flags.FLAGS + GlanceClient = utils.import_class('glance.client.Client') class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" - GLANCE_ONLY_ATTRS = ["size", "location", "disk_format", - "container_format"] + GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format', + 'container_format'] # NOTE(sirp): Overriding to use _translate_to_service provided by # BaseImageService @@ -56,9 +59,7 @@ class GlanceImageService(service.BaseImageService): self.client = client def index(self, context): - """ - Calls out to Glance for a list of images available - """ + """Calls out to Glance for a list of images available.""" # NOTE(sirp): We need to use `get_images_detailed` and not # `get_images` here because we need `is_public` and `properties` # included so we can filter by user @@ -71,9 +72,7 @@ class GlanceImageService(service.BaseImageService): return filtered def detail(self, context): - """ - Calls out to Glance for a list of detailed image information - """ + """Calls out to Glance for a list of detailed image information.""" filtered = [] image_metas = self.client.get_images_detailed() for image_meta in image_metas: @@ -83,40 +82,34 @@ class GlanceImageService(service.BaseImageService): return filtered def show(self, context, image_id): - """ - Returns a dict containing image data for the given opaque image id. - """ + """Returns a dict with image data for the given opaque image id.""" try: image_meta = self.client.get_image_meta(image_id) except glance_exception.NotFound: - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) if not self._is_image_available(context, image_meta): - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) base_image_meta = self._translate_to_base(image_meta) return base_image_meta def show_by_name(self, context, name): - """ - Returns a dict containing image data for the given name. - """ + """Returns a dict containing image data for the given name.""" # TODO(vish): replace this with more efficient call when glance # supports it. image_metas = self.detail(context) for image_meta in image_metas: if name == image_meta.get('name'): return image_meta - raise exception.NotFound + raise exception.ImageNotFound(image_id=name) def get(self, context, image_id, data): - """ - Calls out to Glance for metadata and data and writes data. - """ + """Calls out to Glance for metadata and data and writes data.""" try: image_meta, image_chunks = self.client.get_image(image_id) except glance_exception.NotFound: - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) for chunk in image_chunks: data.write(chunk) @@ -125,16 +118,16 @@ class GlanceImageService(service.BaseImageService): return base_image_meta def create(self, context, image_meta, data=None): - """ - Store the image data and return the new image id. + """Store the image data and return the new image id. + + :raises: AlreadyExists if the image already exist. - :raises AlreadyExists if the image already exist. """ # Translate Base -> Service - LOG.debug(_("Creating image in Glance. Metadata passed in %s"), + LOG.debug(_('Creating image in Glance. Metadata passed in %s'), image_meta) sent_service_image_meta = self._translate_to_service(image_meta) - LOG.debug(_("Metadata after formatting for Glance %s"), + LOG.debug(_('Metadata after formatting for Glance %s'), sent_service_image_meta) recv_service_image_meta = self.client.add_image( @@ -142,83 +135,56 @@ class GlanceImageService(service.BaseImageService): # Translate Service -> Base base_image_meta = self._translate_to_base(recv_service_image_meta) - LOG.debug(_("Metadata returned from Glance formatted for Base %s"), + LOG.debug(_('Metadata returned from Glance formatted for Base %s'), base_image_meta) return base_image_meta def update(self, context, image_id, image_meta, data=None): """Replace the contents of the given image with the new data. - :raises NotFound if the image does not exist. + :raises: ImageNotFound if the image does not exist. + """ + # NOTE(vish): show is to check if image is available + self.show(context, image_id) try: image_meta = self.client.update_image(image_id, image_meta, data) except glance_exception.NotFound: - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) base_image_meta = self._translate_to_base(image_meta) return base_image_meta def delete(self, context, image_id): - """ - Delete the given image. + """Delete the given image. + + :raises: ImageNotFound if the image does not exist. - :raises NotFound if the image does not exist. """ + # NOTE(vish): show is to check if image is available + self.show(context, image_id) try: result = self.client.delete_image(image_id) except glance_exception.NotFound: - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) return result def delete_all(self): - """ - Clears out all images - """ + """Clears out all images.""" pass @classmethod def _translate_to_base(cls, image_meta): - """Overriding the base translation to handle conversion to datetime - objects - """ - image_meta = service.BaseImageService._translate_to_base(image_meta) + """Override translation to handle conversion to datetime objects.""" + image_meta = service.BaseImageService._propertify_metadata( + image_meta, cls.SERVICE_IMAGE_ATTRS) image_meta = _convert_timestamps_to_datetimes(image_meta) return image_meta - @staticmethod - def _is_image_available(context, image_meta): - """ - Images are always available if they are public or if the user is an - admin. - - Otherwise, we filter by project_id (if present) and then fall-back to - images owned by user. - """ - # FIXME(sirp): We should be filtering by user_id on the Glance side - # for security; however, we can't do that until we get authn/authz - # sorted out. Until then, filtering in Nova. - if image_meta['is_public'] or context.is_admin: - return True - - properties = image_meta['properties'] - - if context.project_id and ('project_id' in properties): - return str(properties['project_id']) == str(project_id) - - try: - user_id = properties['user_id'] - except KeyError: - return False - - return str(user_id) == str(context.user_id) - # utility functions def _convert_timestamps_to_datetimes(image_meta): - """ - Returns image with known timestamp fields converted to datetime objects - """ + """Returns image with timestamp fields converted to datetime objects.""" for attr in ['created_at', 'updated_at', 'deleted_at']: if image_meta.get(attr): image_meta[attr] = _parse_glance_iso8601_timestamp( @@ -227,10 +193,8 @@ def _convert_timestamps_to_datetimes(image_meta): def _parse_glance_iso8601_timestamp(timestamp): - """ - Parse a subset of iso8601 timestamps into datetime objects - """ - iso_formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S"] + """Parse a subset of iso8601 timestamps into datetime objects.""" + iso_formats = ['%Y-%m-%dT%H:%M:%S.%f', '%Y-%m-%dT%H:%M:%S'] for iso_format in iso_formats: try: @@ -238,5 +202,5 @@ def _parse_glance_iso8601_timestamp(timestamp): except ValueError: pass - raise ValueError(_("%(timestamp)s does not follow any of the " - "signatures: %(ISO_FORMATS)s") % locals()) + raise ValueError(_('%(timestamp)s does not follow any of the ' + 'signatures: %(ISO_FORMATS)s') % locals()) diff --git a/nova/image/local.py b/nova/image/local.py index 1fb6e1f13..918180bae 100644 --- a/nova/image/local.py +++ b/nova/image/local.py @@ -23,14 +23,15 @@ import shutil from nova import exception from nova import flags from nova import log as logging -from nova.image import service from nova import utils +from nova.image import service FLAGS = flags.FLAGS flags.DEFINE_string('images_path', '$state_path/images', 'path to decrypted images') + LOG = logging.getLogger('nova.image.local') @@ -56,9 +57,8 @@ class LocalImageService(service.BaseImageService): try: unhexed_image_id = int(image_dir, 16) except ValueError: - LOG.error( - _("%s is not in correct directory naming format"\ - % image_dir)) + LOG.error(_('%s is not in correct directory naming format') + % image_dir) else: images.append(unhexed_image_id) return images @@ -84,9 +84,12 @@ class LocalImageService(service.BaseImageService): def show(self, context, image_id): try: with open(self._path_to(image_id)) as metadata_file: - return json.load(metadata_file) + image_meta = json.load(metadata_file) + if not self._is_image_available(context, image_meta): + raise exception.ImageNotFound(image_id=image_id) + return image_meta except (IOError, ValueError): - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) def show_by_name(self, context, name): """Returns a dict containing image data for the given name.""" @@ -98,8 +101,8 @@ class LocalImageService(service.BaseImageService): if name == cantidate.get('name'): image = cantidate break - if image == None: - raise exception.NotFound + if image is None: + raise exception.ImageNotFound(image_id=name) return image def get(self, context, image_id, data): @@ -110,7 +113,7 @@ class LocalImageService(service.BaseImageService): with open(self._path_to(image_id, 'image')) as image_file: shutil.copyfileobj(image_file, data) except (IOError, ValueError): - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) return metadata def create(self, context, metadata, data=None): @@ -119,10 +122,15 @@ class LocalImageService(service.BaseImageService): image_path = self._path_to(image_id, None) if not os.path.exists(image_path): os.mkdir(image_path) - return self.update(context, image_id, metadata, data) + return self._store(context, image_id, metadata, data) def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data.""" + # NOTE(vish): show is to check if image is available + self.show(context, image_id) + return self._store(context, image_id, metadata, data) + + def _store(self, context, image_id, metadata, data=None): metadata['id'] = image_id try: if data: @@ -135,18 +143,21 @@ class LocalImageService(service.BaseImageService): with open(self._path_to(image_id), 'w') as metadata_file: json.dump(metadata, metadata_file) except (IOError, ValueError): - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) return metadata def delete(self, context, image_id): """Delete the given image. - Raises OSError if the image does not exist. + + :raises: ImageNotFound if the image does not exist. """ + # NOTE(vish): show is to check if image is available + self.show(context, image_id) try: shutil.rmtree(self._path_to(image_id, None)) except (IOError, ValueError): - raise exception.NotFound + raise exception.ImageNotFound(image_id=image_id) def delete_all(self): """Clears out all images in local directory.""" diff --git a/nova/image/s3.py b/nova/image/s3.py index ddec5f3aa..c38c58d95 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -16,13 +16,9 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Proxy AMI-related calls from the cloud controller, to the running -objectstore service. -""" +"""Proxy AMI-related calls from cloud controller to objectstore service.""" import binascii -import eventlet import os import shutil import tarfile @@ -30,6 +26,7 @@ import tempfile from xml.etree import ElementTree import boto.s3.connection +import eventlet from nova import crypto from nova import exception @@ -46,64 +43,41 @@ flags.DEFINE_string('image_decryption_dir', '/tmp', class S3ImageService(service.BaseImageService): + """Wraps an existing image service to support s3 based register.""" + def __init__(self, service=None, *args, **kwargs): - if service == None: + if service is None: service = utils.import_object(FLAGS.image_service) self.service = service self.service.__init__(*args, **kwargs) def create(self, context, metadata, data=None): - """metadata['properties'] should contain image_location""" + """Create an image. + + metadata['properties'] should contain image_location. + + """ image = self._s3_create(context, metadata) return image def delete(self, context, image_id): - # FIXME(vish): call to show is to check filter - self.show(context, image_id) self.service.delete(context, image_id) def update(self, context, image_id, metadata, data=None): - # FIXME(vish): call to show is to check filter - self.show(context, image_id) image = self.service.update(context, image_id, metadata, data) return image def index(self, context): - images = self.service.index(context) - # FIXME(vish): index doesn't filter so we do it manually - return self._filter(context, images) + return self.service.index(context) def detail(self, context): - images = self.service.detail(context) - # FIXME(vish): detail doesn't filter so we do it manually - return self._filter(context, images) - - @classmethod - def _is_visible(cls, context, image): - return (context.is_admin - or context.project_id == image['properties']['owner_id'] - or image['properties']['is_public'] == 'True') - - @classmethod - def _filter(cls, context, images): - filtered = [] - for image in images: - if not cls._is_visible(context, image): - continue - filtered.append(image) - return filtered + return self.service.detail(context) def show(self, context, image_id): - image = self.service.show(context, image_id) - if not self._is_visible(context, image): - raise exception.NotFound - return image + return self.service.show(context, image_id) def show_by_name(self, context, name): - image = self.service.show_by_name(context, name) - if not self._is_visible(context, image): - raise exception.NotFound - return image + return self.service.show_by_name(context, name) @staticmethod def _conn(context): @@ -128,12 +102,12 @@ class S3ImageService(service.BaseImageService): return local_filename def _s3_create(self, context, metadata): - """Gets a manifext from s3 and makes an image""" + """Gets a manifext from s3 and makes an image.""" image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir) image_location = metadata['properties']['image_location'] - bucket_name = image_location.split("/")[0] + bucket_name = image_location.split('/')[0] manifest_path = image_location[len(bucket_name) + 1:] bucket = self._conn(context).get_bucket(bucket_name) key = bucket.get_key(manifest_path) @@ -144,7 +118,7 @@ class S3ImageService(service.BaseImageService): image_type = 'machine' try: - kernel_id = manifest.find("machine_configuration/kernel_id").text + kernel_id = manifest.find('machine_configuration/kernel_id').text if kernel_id == 'true': image_format = 'aki' image_type = 'kernel' @@ -153,7 +127,7 @@ class S3ImageService(service.BaseImageService): kernel_id = None try: - ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text + ramdisk_id = manifest.find('machine_configuration/ramdisk_id').text if ramdisk_id == 'true': image_format = 'ari' image_type = 'ramdisk' @@ -162,12 +136,12 @@ class S3ImageService(service.BaseImageService): ramdisk_id = None try: - arch = manifest.find("machine_configuration/architecture").text + arch = manifest.find('machine_configuration/architecture').text except Exception: arch = 'x86_64' properties = metadata['properties'] - properties['owner_id'] = context.project_id + properties['project_id'] = context.project_id properties['architecture'] = arch if kernel_id: @@ -176,8 +150,6 @@ class S3ImageService(service.BaseImageService): if ramdisk_id: properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id) - properties['is_public'] = False - properties['type'] = image_type metadata.update({'disk_format': image_format, 'container_format': image_format, 'status': 'queued', @@ -190,7 +162,7 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" parts = [] - for fn_element in manifest.find("image").getiterator("filename"): + for fn_element in manifest.find('image').getiterator('filename'): part = self._download_file(bucket, fn_element.text, image_path) parts.append(part) @@ -204,9 +176,9 @@ class S3ImageService(service.BaseImageService): metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) - hex_key = manifest.find("image/ec2_encrypted_key").text + hex_key = manifest.find('image/ec2_encrypted_key').text encrypted_key = binascii.a2b_hex(hex_key) - hex_iv = manifest.find("image/ec2_encrypted_iv").text + hex_iv = manifest.find('image/ec2_encrypted_iv').text encrypted_iv = binascii.a2b_hex(hex_iv) # FIXME(vish): grab key from common service so this can run on @@ -244,7 +216,7 @@ class S3ImageService(service.BaseImageService): process_input=encrypted_key, check_exit_code=False) if err: - raise exception.Error(_("Failed to decrypt private key: %s") + raise exception.Error(_('Failed to decrypt private key: %s') % err) iv, err = utils.execute('openssl', 'rsautl', @@ -253,8 +225,8 @@ class S3ImageService(service.BaseImageService): process_input=encrypted_iv, check_exit_code=False) if err: - raise exception.Error(_("Failed to decrypt initialization " - "vector: %s") % err) + raise exception.Error(_('Failed to decrypt initialization ' + 'vector: %s') % err) _out, err = utils.execute('openssl', 'enc', '-d', '-aes-128-cbc', @@ -264,14 +236,14 @@ class S3ImageService(service.BaseImageService): '-out', '%s' % (decrypted_filename,), check_exit_code=False) if err: - raise exception.Error(_("Failed to decrypt image file " - "%(image_file)s: %(err)s") % + raise exception.Error(_('Failed to decrypt image file ' + '%(image_file)s: %(err)s') % {'image_file': encrypted_filename, 'err': err}) @staticmethod def _untarzip_image(path, filename): - tar_file = tarfile.open(filename, "r|gz") + tar_file = tarfile.open(filename, 'r|gz') tar_file.extractall(path) image_file = tar_file.getnames()[0] tar_file.close() diff --git a/nova/image/service.py b/nova/image/service.py index b9897ecae..ab6749049 100644 --- a/nova/image/service.py +++ b/nova/image/service.py @@ -20,7 +20,7 @@ from nova import utils class BaseImageService(object): - """Base class for providing image search and retrieval services + """Base class for providing image search and retrieval services. ImageService exposes two concepts of metadata: @@ -35,7 +35,9 @@ class BaseImageService(object): This means that ImageServices will return BASE_IMAGE_ATTRS as keys in the metadata dict, all other attributes will be returned as keys in the nested 'properties' dict. + """ + BASE_IMAGE_ATTRS = ['id', 'name', 'created_at', 'updated_at', 'deleted_at', 'deleted', 'status', 'is_public'] @@ -45,23 +47,18 @@ class BaseImageService(object): SERVICE_IMAGE_ATTRS = [] def index(self, context): - """ - Returns a sequence of mappings of id and name information about - images. + """List images. - :rtype: array - :retval: a sequence of mappings with the following signature - {'id': opaque id of image, 'name': name of image} + :returns: a sequence of mappings with the following signature + {'id': opaque id of image, 'name': name of image} """ raise NotImplementedError def detail(self, context): - """ - Returns a sequence of mappings of detailed information about images. + """Detailed information about an images. - :rtype: array - :retval: a sequence of mappings with the following signature + :returns: a sequence of mappings with the following signature {'id': opaque id of image, 'name': name of image, 'created_at': creation datetime object, @@ -77,15 +74,14 @@ class BaseImageService(object): NotImplementedError, in which case Nova will emulate this method with repeated calls to show() for each image received from the index() method. + """ raise NotImplementedError def show(self, context, image_id): - """ - Returns a dict containing image metadata for the given opaque image id. - - :retval a mapping with the following signature: + """Detailed information about an image. + :returns: a mapping with the following signature: {'id': opaque id of image, 'name': name of image, 'created_at': creation datetime object, @@ -96,75 +92,107 @@ class BaseImageService(object): 'is_public': boolean indicating if image is public }, ... - :raises NotFound if the image does not exist + :raises: NotFound if the image does not exist + """ raise NotImplementedError def get(self, context, data): - """ - Returns a dict containing image metadata and writes image data to data. + """Get an image. :param data: a file-like object to hold binary image data + :returns: a dict containing image metadata, writes image data to data. + :raises: NotFound if the image does not exist - :raises NotFound if the image does not exist """ raise NotImplementedError def create(self, context, metadata, data=None): - """ - Store the image metadata and data and return the new image metadata. + """Store the image metadata and data. - :raises AlreadyExists if the image already exist. + :returns: the new image metadata. + :raises: AlreadyExists if the image already exist. """ raise NotImplementedError def update(self, context, image_id, metadata, data=None): - """Update the given image metadata and data and return the metadata + """Update the given image metadata and data and return the metadata. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. """ raise NotImplementedError def delete(self, context, image_id): - """ - Delete the given image. + """Delete the given image. - :raises NotFound if the image does not exist. + :raises: NotFound if the image does not exist. """ raise NotImplementedError + @staticmethod + def _is_image_available(context, image_meta): + """Check image availability. + + Images are always available if they are public or if the user is an + admin. + + Otherwise, we filter by project_id (if present) and then fall-back to + images owned by user. + + """ + # FIXME(sirp): We should be filtering by user_id on the Glance side + # for security; however, we can't do that until we get authn/authz + # sorted out. Until then, filtering in Nova. + if image_meta['is_public'] or context.is_admin: + return True + + properties = image_meta['properties'] + + if context.project_id and ('project_id' in properties): + return str(properties['project_id']) == str(context.project_id) + + try: + user_id = properties['user_id'] + except KeyError: + return False + + return str(user_id) == str(context.user_id) + @classmethod def _translate_to_base(cls, metadata): """Return a metadata dictionary that is BaseImageService compliant. This is used by subclasses to expose only a metadata dictionary that is the same across ImageService implementations. + """ return cls._propertify_metadata(metadata, cls.BASE_IMAGE_ATTRS) @classmethod def _translate_to_service(cls, metadata): - """Return a metadata dictionary that is usable by the ImageService - subclass. + """Return a metadata dict that is usable by the ImageService subclass. As an example, Glance has additional attributes (like 'location'); the BaseImageService considers these properties, but we need to translate these back to first-class attrs for sending to Glance. This method handles this by allowing you to specify the attributes an ImageService considers first-class. + """ if not cls.SERVICE_IMAGE_ATTRS: - raise NotImplementedError(_("Cannot use this without specifying " - "SERVICE_IMAGE_ATTRS for subclass")) + raise NotImplementedError(_('Cannot use this without specifying ' + 'SERVICE_IMAGE_ATTRS for subclass')) return cls._propertify_metadata(metadata, cls.SERVICE_IMAGE_ATTRS) @staticmethod def _propertify_metadata(metadata, keys): - """Return a dict with any unrecognized keys placed in the nested - 'properties' dict. + """Move unknown keys to a nested 'properties' dict. + + :returns: a new dict with the keys moved. + """ flattened = utils.flatten_dict(metadata) attributes, properties = utils.partition_dict(flattened, keys) diff --git a/nova/tests/real_flags.py b/nova/ipv6/__init__.py index 71da04992..da4567cfb 100644 --- a/nova/tests/real_flags.py +++ b/nova/ipv6/__init__.py @@ -1,8 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. +# Copyright (c) 2011 Openstack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -16,11 +14,4 @@ # License for the specific language governing permissions and limitations # under the License. -from nova import flags - -FLAGS = flags.FLAGS - -FLAGS.connection_type = 'libvirt' -FLAGS.fake_rabbit = False -FLAGS.fake_network = False -FLAGS.verbose = False +from nova.ipv6.api import * diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py new file mode 100644 index 000000000..258678f0a --- /dev/null +++ b/nova/ipv6/account_identifier.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""IPv6 address generation with account identifier embedded""" + +import hashlib +import netaddr + + +def to_global(prefix, mac, project_id): + project_hash = netaddr.IPAddress(int(hashlib.sha1(project_id).\ + hexdigest()[:8], 16) << 32) + static_num = netaddr.IPAddress(0xff << 24) + + try: + mac_suffix = netaddr.EUI(mac).words[3:] + int_addr = int(''.join(['%02x' % i for i in mac_suffix]), 16) + mac_addr = netaddr.IPAddress(int_addr) + maskIP = netaddr.IPNetwork(prefix).ip + return (project_hash ^ static_num ^ mac_addr | maskIP).format() + except TypeError: + raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) + + +def to_mac(ipv6_address): + address = netaddr.IPAddress(ipv6_address) + mask1 = netaddr.IPAddress('::ff:ffff') + mac = netaddr.EUI(int(address & mask1)).words + return ':'.join(['02', '16', '3e'] + ['%02x' % i for i in mac[3:6]]) diff --git a/nova/ipv6/api.py b/nova/ipv6/api.py new file mode 100644 index 000000000..da003645a --- /dev/null +++ b/nova/ipv6/api.py @@ -0,0 +1,41 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS +flags.DEFINE_string('ipv6_backend', + 'rfc2462', + 'Backend to use for IPv6 generation') + + +def reset_backend(): + global IMPL + IMPL = utils.LazyPluggable(FLAGS['ipv6_backend'], + rfc2462='nova.ipv6.rfc2462', + account_identifier='nova.ipv6.account_identifier') + + +def to_global(prefix, mac, project_id): + return IMPL.to_global(prefix, mac, project_id) + + +def to_mac(ipv6_address): + return IMPL.to_mac(ipv6_address) + +reset_backend() diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py new file mode 100644 index 000000000..0074efe98 --- /dev/null +++ b/nova/ipv6/rfc2462.py @@ -0,0 +1,42 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Justin Santa Barbara +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""RFC2462 style IPv6 address generation""" + +import netaddr + + +def to_global(prefix, mac, project_id): + try: + mac64 = netaddr.EUI(mac).eui64().words + int_addr = int(''.join(['%02x' % i for i in mac64]), 16) + mac64_addr = netaddr.IPAddress(int_addr) + maskIP = netaddr.IPNetwork(prefix).ip + return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\ + format() + except TypeError: + raise TypeError(_('Bad mac for to_global_ipv6: %s') % mac) + + +def to_mac(ipv6_address): + address = netaddr.IPAddress(ipv6_address) + mask1 = netaddr.IPAddress('::ffff:ffff:ffff:ffff') + mask2 = netaddr.IPAddress('::0200:0:0:0') + mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words + return ':'.join(['%02x' % i for i in mac64[0:3] + mac64[5:8]]) diff --git a/nova/log.py b/nova/log.py index d194ab8f0..096279f7c 100644 --- a/nova/log.py +++ b/nova/log.py @@ -16,16 +16,15 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Nova logging handler. +"""Nova logging handler. This module adds to logging functionality by adding the option to specify a context object when calling the various log methods. If the context object is not specified, default formatting is used. It also allows setting of formatting information through flags. -""" +""" import cStringIO import inspect @@ -41,34 +40,28 @@ from nova import version FLAGS = flags.FLAGS - flags.DEFINE_string('logging_context_format_string', '%(asctime)s %(levelname)s %(name)s ' '[%(request_id)s %(user)s ' '%(project)s] %(message)s', 'format string to use for log messages with context') - flags.DEFINE_string('logging_default_format_string', '%(asctime)s %(levelname)s %(name)s [-] ' '%(message)s', 'format string to use for log messages without context') - flags.DEFINE_string('logging_debug_format_suffix', 'from (pid=%(process)d) %(funcName)s' ' %(pathname)s:%(lineno)d', 'data to append to log format when level is DEBUG') - flags.DEFINE_string('logging_exception_prefix', '(%(name)s): TRACE: ', 'prefix each line of exception output with this format') - flags.DEFINE_list('default_log_levels', ['amqplib=WARN', 'sqlalchemy=WARN', 'boto=WARN', 'eventlet.wsgi.server=WARN'], 'list of logger=LEVEL pairs') - flags.DEFINE_bool('use_syslog', False, 'output to syslog') flags.DEFINE_string('logfile', None, 'output to named file') @@ -83,6 +76,8 @@ WARN = logging.WARN INFO = logging.INFO DEBUG = logging.DEBUG NOTSET = logging.NOTSET + + # methods getLogger = logging.getLogger debug = logging.debug @@ -93,6 +88,8 @@ error = logging.error exception = logging.exception critical = logging.critical log = logging.log + + # handlers StreamHandler = logging.StreamHandler WatchedFileHandler = logging.handlers.WatchedFileHandler @@ -106,7 +103,7 @@ logging.addLevelName(AUDIT, 'AUDIT') def _dictify_context(context): - if context == None: + if context is None: return None if not isinstance(context, dict) \ and getattr(context, 'to_dict', None): @@ -127,17 +124,18 @@ def _get_log_file_path(binary=None): class NovaLogger(logging.Logger): - """ - NovaLogger manages request context and formatting. + """NovaLogger manages request context and formatting. This becomes the class that is instanciated by logging.getLogger. + """ + def __init__(self, name, level=NOTSET): logging.Logger.__init__(self, name, level) self.setup_from_flags() def setup_from_flags(self): - """Setup logger from flags""" + """Setup logger from flags.""" level = NOTSET for pair in FLAGS.default_log_levels: logger, _sep, level_name = pair.partition('=') @@ -148,7 +146,7 @@ class NovaLogger(logging.Logger): self.setLevel(level) def _log(self, level, msg, args, exc_info=None, extra=None, context=None): - """Extract context from any log call""" + """Extract context from any log call.""" if not extra: extra = {} if context: @@ -157,17 +155,17 @@ class NovaLogger(logging.Logger): return logging.Logger._log(self, level, msg, args, exc_info, extra) def addHandler(self, handler): - """Each handler gets our custom formatter""" + """Each handler gets our custom formatter.""" handler.setFormatter(_formatter) return logging.Logger.addHandler(self, handler) def audit(self, msg, *args, **kwargs): - """Shortcut for our AUDIT level""" + """Shortcut for our AUDIT level.""" if self.isEnabledFor(AUDIT): self._log(AUDIT, msg, args, **kwargs) def exception(self, msg, *args, **kwargs): - """Logging.exception doesn't handle kwargs, so breaks context""" + """Logging.exception doesn't handle kwargs, so breaks context.""" if not kwargs.get('exc_info'): kwargs['exc_info'] = 1 self.error(msg, *args, **kwargs) @@ -181,14 +179,13 @@ class NovaLogger(logging.Logger): for k in env.keys(): if not isinstance(env[k], str): env.pop(k) - message = "Environment: %s" % json.dumps(env) + message = 'Environment: %s' % json.dumps(env) kwargs.pop('exc_info') self.error(message, **kwargs) class NovaFormatter(logging.Formatter): - """ - A nova.context.RequestContext aware formatter configured through flags. + """A nova.context.RequestContext aware formatter configured through flags. The flags used to set format strings are: logging_context_foramt_string and logging_default_format_string. You can also specify @@ -197,10 +194,11 @@ class NovaFormatter(logging.Formatter): For information about what variables are available for the formatter see: http://docs.python.org/library/logging.html#formatter + """ def format(self, record): - """Uses contextstring if request_id is set, otherwise default""" + """Uses contextstring if request_id is set, otherwise default.""" if record.__dict__.get('request_id', None): self._fmt = FLAGS.logging_context_format_string else: @@ -214,20 +212,21 @@ class NovaFormatter(logging.Formatter): return logging.Formatter.format(self, record) def formatException(self, exc_info, record=None): - """Format exception output with FLAGS.logging_exception_prefix""" + """Format exception output with FLAGS.logging_exception_prefix.""" if not record: return logging.Formatter.formatException(self, exc_info) stringbuffer = cStringIO.StringIO() traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], None, stringbuffer) - lines = stringbuffer.getvalue().split("\n") + lines = stringbuffer.getvalue().split('\n') stringbuffer.close() formatted_lines = [] for line in lines: pl = FLAGS.logging_exception_prefix % record.__dict__ - fl = "%s%s" % (pl, line) + fl = '%s%s' % (pl, line) formatted_lines.append(fl) - return "\n".join(formatted_lines) + return '\n'.join(formatted_lines) + _formatter = NovaFormatter() @@ -241,7 +240,7 @@ class NovaRootLogger(NovaLogger): NovaLogger.__init__(self, name, level) def setup_from_flags(self): - """Setup logger from flags""" + """Setup logger from flags.""" global _filelog if FLAGS.use_syslog: self.syslog = SysLogHandler(address='/dev/log') diff --git a/nova/manager.py b/nova/manager.py index 804a50479..34338ac04 100644 --- a/nova/manager.py +++ b/nova/manager.py @@ -16,7 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. -""" +"""Base Manager class. + Managers are responsible for a certain aspect of the sytem. It is a logical grouping of code relating to a portion of the system. In general other components should be using the manager to make changes to the components that @@ -49,16 +50,19 @@ Managers will often provide methods for initial setup of a host or periodic tasksto a wrapping service. This module provides Manager, a base class for managers. + """ -from nova import utils from nova import flags from nova import log as logging +from nova import utils from nova.db import base from nova.scheduler import api + FLAGS = flags.FLAGS + LOG = logging.getLogger('nova.manager') @@ -70,23 +74,29 @@ class Manager(base.Base): super(Manager, self).__init__(db_driver) def periodic_tasks(self, context=None): - """Tasks to be run at a periodic interval""" + """Tasks to be run at a periodic interval.""" pass def init_host(self): - """Do any initialization that needs to be run if this is a standalone - service. Child classes should override this method.""" + """Handle initialization if this is a standalone service. + + Child classes should override this method. + + """ pass class SchedulerDependentManager(Manager): """Periodically send capability updates to the Scheduler services. - Services that need to update the Scheduler of their capabilities - should derive from this class. Otherwise they can derive from - manager.Manager directly. Updates are only sent after - update_service_capabilities is called with non-None values.""" - def __init__(self, host=None, db_driver=None, service_name="undefined"): + Services that need to update the Scheduler of their capabilities + should derive from this class. Otherwise they can derive from + manager.Manager directly. Updates are only sent after + update_service_capabilities is called with non-None values. + + """ + + def __init__(self, host=None, db_driver=None, service_name='undefined'): self.last_capabilities = None self.service_name = service_name super(SchedulerDependentManager, self).__init__(host, db_driver) @@ -96,9 +106,9 @@ class SchedulerDependentManager(Manager): self.last_capabilities = capabilities def periodic_tasks(self, context=None): - """Pass data back to the scheduler at a periodic interval""" + """Pass data back to the scheduler at a periodic interval.""" if self.last_capabilities: - LOG.debug(_("Notifying Schedulers of capabilities ...")) + LOG.debug(_('Notifying Schedulers of capabilities ...')) api.update_service_capabilities(context, self.service_name, self.host, self.last_capabilities) diff --git a/nova/network/api.py b/nova/network/api.py index c56e3062b..e2eacdf42 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -16,9 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Handles all requests relating to instances (guest vms). -""" +"""Handles all requests relating to instances (guest vms).""" from nova import db from nova import exception @@ -28,6 +26,7 @@ from nova import quota from nova import rpc from nova.db import base + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.network') @@ -37,48 +36,54 @@ class API(base.Base): def allocate_floating_ip(self, context): if quota.allowed_floating_ips(context, 1) < 1: - LOG.warn(_("Quota exceeeded for %s, tried to allocate " - "address"), - context.project_id) - raise quota.QuotaError(_("Address quota exceeded. You cannot " - "allocate any more addresses")) + LOG.warn(_('Quota exceeeded for %s, tried to allocate ' + 'address'), + context.project_id) + raise quota.QuotaError(_('Address quota exceeded. You cannot ' + 'allocate any more addresses')) # NOTE(vish): We don't know which network host should get the ip # when we allocate, so just send it to any one. This # will probably need to move into a network supervisor # at some point. return rpc.call(context, FLAGS.network_topic, - {"method": "allocate_floating_ip", - "args": {"project_id": context.project_id}}) + {'method': 'allocate_floating_ip', + 'args': {'project_id': context.project_id}}) - def release_floating_ip(self, context, address): + def release_floating_ip(self, context, address, + affect_auto_assigned=False): floating_ip = self.db.floating_ip_get_by_address(context, address) + if not affect_auto_assigned and floating_ip.get('auto_assigned'): + return # NOTE(vish): We don't know which network host should get the ip # when we deallocate, so just send it to any one. This # will probably need to move into a network supervisor # at some point. rpc.cast(context, FLAGS.network_topic, - {"method": "deallocate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) + {'method': 'deallocate_floating_ip', + 'args': {'floating_address': floating_ip['address']}}) - def associate_floating_ip(self, context, floating_ip, fixed_ip): + def associate_floating_ip(self, context, floating_ip, fixed_ip, + affect_auto_assigned=False): if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) + if not affect_auto_assigned and floating_ip.get('auto_assigned'): + return # Check if the floating ip address is allocated if floating_ip['project_id'] is None: - raise exception.ApiError(_("Address (%s) is not allocated") % + raise exception.ApiError(_('Address (%s) is not allocated') % floating_ip['address']) # Check if the floating ip address is allocated to the same project if floating_ip['project_id'] != context.project_id: - LOG.warn(_("Address (%(address)s) is not allocated to your " - "project (%(project)s)"), + LOG.warn(_('Address (%(address)s) is not allocated to your ' + 'project (%(project)s)'), {'address': floating_ip['address'], 'project': context.project_id}) - raise exception.ApiError(_("Address (%(address)s) is not " - "allocated to your project" - "(%(project)s)") % + raise exception.ApiError(_('Address (%(address)s) is not ' + 'allocated to your project' + '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) # NOTE(vish): Perhaps we should just pass this on to compute and @@ -86,12 +91,15 @@ class API(base.Base): host = fixed_ip['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "associate_floating_ip", - "args": {"floating_address": floating_ip['address'], - "fixed_address": fixed_ip['address']}}) + {'method': 'associate_floating_ip', + 'args': {'floating_address': floating_ip['address'], + 'fixed_address': fixed_ip['address']}}) - def disassociate_floating_ip(self, context, address): + def disassociate_floating_ip(self, context, address, + affect_auto_assigned=False): floating_ip = self.db.floating_ip_get_by_address(context, address) + if not affect_auto_assigned and floating_ip.get('auto_assigned'): + return if not floating_ip.get('fixed_ip'): raise exception.ApiError('Address is not associated.') # NOTE(vish): Get the topic from the host name of the network of @@ -99,5 +107,5 @@ class API(base.Base): host = floating_ip['fixed_ip']['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), - {"method": "disassociate_floating_ip", - "args": {"floating_address": floating_ip['address']}}) + {'method': 'disassociate_floating_ip', + 'args': {'floating_address': floating_ip['address']}}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 322524a41..b9d9838f4 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -15,26 +15,27 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans, bridges, and iptables rules using linux utilities. -""" +"""Implements vlans, bridges, and iptables rules using linux utilities.""" + +import calendar import inspect import os -import calendar from nova import db from nova import exception from nova import flags from nova import log as logging from nova import utils +from IPy import IP + LOG = logging.getLogger("nova.linux_net") def _bin_file(script): - """Return the absolute path to scipt in the bin directory""" - return os.path.abspath(os.path.join(__file__, "../../../bin", script)) + """Return the absolute path to scipt in the bin directory.""" + return os.path.abspath(os.path.join(__file__, '../../../bin', script)) FLAGS = flags.FLAGS @@ -56,22 +57,23 @@ flags.DEFINE_string('input_chain', 'INPUT', 'chain to add nova_input to') flags.DEFINE_integer('dhcp_lease_time', 120, 'Lifetime of a DHCP lease') - flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', 'dmz range that should be accepted') - - +flags.DEFINE_string('dnsmasq_config_file', "", + 'Override the default dnsmasq settings with this file') binary_name = os.path.basename(inspect.stack()[-1][1]) class IptablesRule(object): - """An iptables rule + """An iptables rule. You shouldn't need to use this class directly, it's only used by - IptablesManager + IptablesManager. + """ + def __init__(self, chain, rule, wrap=True, top=False): self.chain = chain self.rule = rule @@ -96,7 +98,7 @@ class IptablesRule(object): class IptablesTable(object): - """An iptables table""" + """An iptables table.""" def __init__(self): self.rules = [] @@ -104,15 +106,16 @@ class IptablesTable(object): self.unwrapped_chains = set() def add_chain(self, name, wrap=True): - """Adds a named chain to the table + """Adds a named chain to the table. The chain name is wrapped to be unique for the component creating it, so different components of Nova can safely create identically named chains without interfering with one another. At the moment, its wrapped name is <binary name>-<chain name>, - so if nova-compute creates a chain named "OUTPUT", it'll actually - end up named "nova-compute-OUTPUT". + so if nova-compute creates a chain named 'OUTPUT', it'll actually + end up named 'nova-compute-OUTPUT'. + """ if wrap: self.chains.add(name) @@ -120,12 +123,13 @@ class IptablesTable(object): self.unwrapped_chains.add(name) def remove_chain(self, name, wrap=True): - """Remove named chain + """Remove named chain. This removal "cascades". All rule in the chain are removed, as are all rules in other chains that jump to it. If the chain is not found, this is merely logged. + """ if wrap: chain_set = self.chains @@ -133,7 +137,7 @@ class IptablesTable(object): chain_set = self.unwrapped_chains if name not in chain_set: - LOG.debug(_("Attempted to remove chain %s which doesn't exist"), + LOG.debug(_('Attempted to remove chain %s which does not exist'), name) return @@ -148,17 +152,18 @@ class IptablesTable(object): self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules) def add_rule(self, chain, rule, wrap=True, top=False): - """Add a rule to the table + """Add a rule to the table. This is just like what you'd feed to iptables, just without - the "-A <chain name>" bit at the start. + the '-A <chain name>' bit at the start. However, if you need to jump to one of your wrapped chains, prepend its name with a '$' which will ensure the wrapping is applied correctly. + """ if wrap and chain not in self.chains: - raise ValueError(_("Unknown chain: %r") % chain) + raise ValueError(_('Unknown chain: %r') % chain) if '$' in rule: rule = ' '.join(map(self._wrap_target_chain, rule.split(' '))) @@ -171,17 +176,18 @@ class IptablesTable(object): return s def remove_rule(self, chain, rule, wrap=True, top=False): - """Remove a rule from a chain + """Remove a rule from a chain. Note: The rule must be exactly identical to the one that was added. You cannot switch arguments around like you can with the iptables CLI tool. + """ try: self.rules.remove(IptablesRule(chain, rule, wrap, top)) except ValueError: - LOG.debug(_("Tried to remove rule that wasn't there:" - " %(chain)r %(rule)r %(wrap)r %(top)r"), + LOG.debug(_('Tried to remove rule that was not there:' + ' %(chain)r %(rule)r %(wrap)r %(top)r'), {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) @@ -194,7 +200,7 @@ class IptablesTable(object): class IptablesManager(object): - """Wrapper for iptables + """Wrapper for iptables. See IptablesTable for some usage docs @@ -213,7 +219,9 @@ class IptablesManager(object): For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are wrapped in the same was as the builtin filter chains. Additionally, there's a snat chain that is applied after the POSTROUTING chain. + """ + def __init__(self, execute=None): if not execute: self.execute = _execute @@ -275,11 +283,12 @@ class IptablesManager(object): @utils.synchronized('iptables', external=True) def apply(self): - """Apply the current in-memory set of iptables rules + """Apply the current in-memory set of iptables rules. This will blow away any rules left over from previous runs of the same component of Nova, and replace them with our current set of rules. This happens atomically, thanks to iptables-restore. + """ s = [('iptables', self.ipv4)] if FLAGS.use_ipv6: @@ -356,55 +365,65 @@ class IptablesManager(object): def metadata_forward(): - """Create forwarding rule for metadata""" - iptables_manager.ipv4['nat'].add_rule("PREROUTING", - "-s 0.0.0.0/0 -d 169.254.169.254/32 " - "-p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % \ + """Create forwarding rule for metadata.""" + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-s 0.0.0.0/0 -d 169.254.169.254/32 ' + '-p tcp -m tcp --dport 80 -j DNAT ' + '--to-destination %s:%s' % \ (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) iptables_manager.apply() def init_host(): - """Basic networking setup goes here""" + """Basic networking setup goes here.""" # NOTE(devcamcar): Cloud public SNAT entries and the default # SNAT rule for outbound traffic. - iptables_manager.ipv4['nat'].add_rule("snat", - "-s %s -j SNAT --to-source %s" % \ + iptables_manager.ipv4['nat'].add_rule('snat', + '-s %s -j SNAT --to-source %s' % \ (FLAGS.fixed_range, FLAGS.routing_source_ip)) - iptables_manager.ipv4['nat'].add_rule("POSTROUTING", - "-s %s -d %s -j ACCEPT" % \ + iptables_manager.ipv4['nat'].add_rule('POSTROUTING', + '-s %s -d %s -j ACCEPT' % \ (FLAGS.fixed_range, FLAGS.dmz_cidr)) - iptables_manager.ipv4['nat'].add_rule("POSTROUTING", - "-s %(range)s -d %(range)s " - "-j ACCEPT" % \ + iptables_manager.ipv4['nat'].add_rule('POSTROUTING', + '-s %(range)s -d %(range)s ' + '-j ACCEPT' % \ {'range': FLAGS.fixed_range}) iptables_manager.apply() def bind_floating_ip(floating_ip, check_exit_code=True): - """Bind ip to public interface""" + """Bind ip to public interface.""" _execute('sudo', 'ip', 'addr', 'add', floating_ip, 'dev', FLAGS.public_interface, check_exit_code=check_exit_code) def unbind_floating_ip(floating_ip): - """Unbind a public ip from public interface""" + """Unbind a public ip from public interface.""" _execute('sudo', 'ip', 'addr', 'del', floating_ip, 'dev', FLAGS.public_interface) +def ensure_metadata_ip(): + """Sets up local metadata ip.""" + _execute('sudo', 'ip', 'addr', 'add', '169.254.169.254/32', + 'scope', 'link', 'dev', 'lo', check_exit_code=False) + + def ensure_vlan_forward(public_ip, port, private_ip): - """Sets up forwarding rules for vlan""" - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "-d %s -p udp " - "--dport 1194 " - "-j ACCEPT" % private_ip) - iptables_manager.ipv4['nat'].add_rule("PREROUTING", + """Sets up forwarding rules for vlan.""" + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '-d %s -p udp ' + '--dport 1194 ' + '-j ACCEPT' % private_ip) + iptables_manager.ipv4['nat'].add_rule('PREROUTING', + '-d %s -p udp ' + '--dport %s -j DNAT --to %s:1194' % + (public_ip, port, private_ip)) + iptables_manager.ipv4['nat'].add_rule("OUTPUT", "-d %s -p udp " "--dport %s -j DNAT --to %s:1194" % (public_ip, port, private_ip)) @@ -412,43 +431,45 @@ def ensure_vlan_forward(public_ip, port, private_ip): def ensure_floating_forward(floating_ip, fixed_ip): - """Ensure floating ip forwarding rule""" + """Ensure floating ip forwarding rule.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].add_rule(chain, rule) iptables_manager.apply() def remove_floating_forward(floating_ip, fixed_ip): - """Remove forwarding for floating ip""" + """Remove forwarding for floating ip.""" for chain, rule in floating_forward_rules(floating_ip, fixed_ip): iptables_manager.ipv4['nat'].remove_rule(chain, rule) iptables_manager.apply() def floating_forward_rules(floating_ip, fixed_ip): - return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), - ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)), - ("floating-snat", - "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))] + return [('PREROUTING', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)), + ('OUTPUT', '-d %s -j DNAT --to %s' % (floating_ip, fixed_ip)), + ('floating-snat', + '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): - """Create a vlan and bridge unless they already exist""" + """Create a vlan and bridge unless they already exist.""" interface = ensure_vlan(vlan_num) ensure_bridge(bridge, interface, net_attrs) +@utils.synchronized('ensure_vlan', external=True) def ensure_vlan(vlan_num): - """Create a vlan unless it already exists""" - interface = "vlan%s" % vlan_num + """Create a vlan unless it already exists.""" + interface = 'vlan%s' % vlan_num if not _device_exists(interface): - LOG.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_('Starting VLAN inteface %s'), interface) _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) _execute('sudo', 'ip', 'link', 'set', interface, 'up') return interface +@utils.synchronized('ensure_bridge', external=True) def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists. @@ -461,12 +482,13 @@ def ensure_bridge(bridge, interface, net_attrs=None): The code will attempt to move any ips that already exist on the interface onto the bridge and reset the default gateway if necessary. + """ if not _device_exists(bridge): - LOG.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_('Starting Bridge interface for %s'), interface) _execute('sudo', 'brctl', 'addbr', bridge) _execute('sudo', 'brctl', 'setfd', bridge, 0) - # _execute("sudo brctl setageing %s 10" % bridge) + # _execute('sudo brctl setageing %s 10' % bridge) _execute('sudo', 'brctl', 'stp', bridge, 'off') _execute('sudo', 'ip', 'link', 'set', bridge, 'up') if net_attrs: @@ -474,15 +496,15 @@ def ensure_bridge(bridge, interface, net_attrs=None): # bridge for it to respond to reqests properly suffix = net_attrs['cidr'].rpartition('/')[2] out, err = _execute('sudo', 'ip', 'addr', 'add', - "%s/%s" % + '%s/%s' % (net_attrs['gateway'], suffix), 'brd', net_attrs['broadcast'], 'dev', bridge, check_exit_code=False) - if err and err != "RTNETLINK answers: File exists\n": - raise exception.Error("Failed to add ip: %s" % err) + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) if(FLAGS.use_ipv6): _execute('sudo', 'ip', '-f', 'inet6', 'addr', 'change', net_attrs['cidr_v6'], @@ -498,37 +520,39 @@ def ensure_bridge(bridge, interface, net_attrs=None): # interface, so we move any ips to the bridge gateway = None out, err = _execute('sudo', 'route', '-n') - for line in out.split("\n"): + for line in out.split('\n'): fields = line.split() - if fields and fields[0] == "0.0.0.0" and fields[-1] == interface: + if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: gateway = fields[1] + _execute('sudo', 'route', 'del', 'default', 'gw', gateway, + 'dev', interface, check_exit_code=False) out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface, 'scope', 'global') - for line in out.split("\n"): + for line in out.split('\n'): fields = line.split() - if fields and fields[0] == "inet": + if fields and fields[0] == 'inet': params = fields[1:-1] _execute(*_ip_bridge_cmd('del', params, fields[-1])) _execute(*_ip_bridge_cmd('add', params, bridge)) if gateway: - _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) + _execute('sudo', 'route', 'add', 'default', 'gw', gateway) out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, check_exit_code=False) if (err and err != "device %s is already a member of a bridge; can't " "enslave it to bridge %s.\n" % (interface, bridge)): - raise exception.Error("Failed to add interface: %s" % err) + raise exception.Error('Failed to add interface: %s' % err) - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "--in-interface %s -j ACCEPT" % \ + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--in-interface %s -j ACCEPT' % \ bridge) - iptables_manager.ipv4['filter'].add_rule("FORWARD", - "--out-interface %s -j ACCEPT" % \ + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--out-interface %s -j ACCEPT' % \ bridge) def get_dhcp_leases(context, network_id): - """Return a network's hosts config in dnsmasq leasefile format""" + """Return a network's hosts config in dnsmasq leasefile format.""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -537,7 +561,7 @@ def get_dhcp_leases(context, network_id): def get_dhcp_hosts(context, network_id): - """Get a string containing a network's hosts config in dhcp-host format""" + """Get network's hosts config in dhcp-host format.""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -550,10 +574,11 @@ def get_dhcp_hosts(context, network_id): # aren't reloaded. @utils.synchronized('dnsmasq_start') def update_dhcp(context, network_id): - """(Re)starts a dnsmasq server for a given network + """(Re)starts a dnsmasq server for a given network. + + If a dnsmasq instance is already running then send a HUP + signal causing it to reload, otherwise spawn a new instance. - if a dnsmasq instance is already running then send a HUP - signal causing it to reload, otherwise spawn a new instance """ network_ref = db.network_get(context, network_id) @@ -568,16 +593,16 @@ def update_dhcp(context, network_id): # if dnsmasq is already running, then tell it to reload if pid: - out, _err = _execute('cat', "/proc/%d/cmdline" % pid, + out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_('Hupping dnsmasq threw %s'), exc) else: - LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -620,18 +645,18 @@ interface %s try: _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("killing radvd threw %s"), exc) + LOG.debug(_('killing radvd threw %s'), exc) else: - LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) + LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) command = _ra_cmd(network_ref) _execute(*command) db.network_update(context, network_id, - {"gateway_v6": + {'gateway_v6': utils.get_my_linklocal(network_ref['bridge'])}) def _host_lease(fixed_ip_ref): - """Return a host string for an address in leasefile format""" + """Return a host string for an address in leasefile format.""" instance_ref = fixed_ip_ref['instance'] if instance_ref['updated_at']: timestamp = instance_ref['updated_at'] @@ -640,48 +665,49 @@ def _host_lease(fixed_ip_ref): seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) - return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time, + return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time, instance_ref['mac_address'], fixed_ip_ref['address'], instance_ref['hostname'] or '*') def _host_dhcp(fixed_ip_ref): - """Return a host string for an address in dhcp-host format""" + """Return a host string for an address in dhcp-host format.""" instance_ref = fixed_ip_ref['instance'] - return "%s,%s.%s,%s" % (instance_ref['mac_address'], + return '%s,%s.%s,%s' % (instance_ref['mac_address'], instance_ref['hostname'], FLAGS.dhcp_domain, fixed_ip_ref['address']) def _execute(*cmd, **kwargs): - """Wrapper around utils._execute for fake_network""" + """Wrapper around utils._execute for fake_network.""" if FLAGS.fake_network: - LOG.debug("FAKE NET: %s", " ".join(map(str, cmd))) - return "fake", 0 + LOG.debug('FAKE NET: %s', ' '.join(map(str, cmd))) + return 'fake', 0 else: return utils.execute(*cmd, **kwargs) def _device_exists(device): - """Check if ethernet device exists""" + """Check if ethernet device exists.""" (_out, err) = _execute('ip', 'link', 'show', 'dev', device, check_exit_code=False) return not err def _dnsmasq_cmd(net): - """Builds dnsmasq command""" + """Builds dnsmasq command.""" cmd = ['sudo', '-E', 'dnsmasq', '--strict-order', '--bind-interfaces', - '--conf-file=', + '--conf-file=%s' % FLAGS.dnsmasq_config_file, '--domain=%s' % FLAGS.dhcp_domain, '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), '--listen-address=%s' % net['gateway'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], + '--dhcp-lease-max=%s' % IP(net['cidr']).len(), '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] @@ -691,7 +717,7 @@ def _dnsmasq_cmd(net): def _ra_cmd(net): - """Builds radvd command""" + """Builds radvd command.""" cmd = ['sudo', '-E', 'radvd', # '-u', 'nobody', '-C', '%s' % _ra_file(net['bridge'], 'conf'), @@ -700,44 +726,43 @@ def _ra_cmd(net): def _stop_dnsmasq(network): - """Stops the dnsmasq instance for a given network""" + """Stops the dnsmasq instance for a given network.""" pid = _dnsmasq_pid_for(network) if pid: try: _execute('sudo', 'kill', '-TERM', pid) except Exception as exc: # pylint: disable=W0703 - LOG.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_('Killing dnsmasq threw %s'), exc) def _dhcp_file(bridge, kind): - """Return path to a pid, leases or conf file for a bridge""" - + """Return path to a pid, leases or conf file for a bridge.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) - return os.path.abspath("%s/nova-%s.%s" % (FLAGS.networks_path, + return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, bridge, kind)) def _ra_file(bridge, kind): - """Return path to a pid or conf file for a bridge""" + """Return path to a pid or conf file for a bridge.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) - return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path, + return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path, bridge, kind)) def _dnsmasq_pid_for(bridge): - """Returns the pid for prior dnsmasq instance for a bridge + """Returns the pid for prior dnsmasq instance for a bridge. - Returns None if no pid file exists + Returns None if no pid file exists. - If machine has rebooted pid might be incorrect (caller should check) - """ + If machine has rebooted pid might be incorrect (caller should check). + """ pid_file = _dhcp_file(bridge, 'pid') if os.path.exists(pid_file): @@ -746,13 +771,13 @@ def _dnsmasq_pid_for(bridge): def _ra_pid_for(bridge): - """Returns the pid for prior radvd instance for a bridge + """Returns the pid for prior radvd instance for a bridge. - Returns None if no pid file exists + Returns None if no pid file exists. - If machine has rebooted pid might be incorrect (caller should check) - """ + If machine has rebooted pid might be incorrect (caller should check). + """ pid_file = _ra_file(bridge, 'pid') if os.path.exists(pid_file): @@ -761,8 +786,7 @@ def _ra_pid_for(bridge): def _ip_bridge_cmd(action, params, device): - """Build commands to add/del ips to bridges/devices""" - + """Build commands to add/del ips to bridges/devices.""" cmd = ['sudo', 'ip', 'addr', action] cmd.extend(params) cmd.extend(['dev', device]) diff --git a/nova/network/manager.py b/nova/network/manager.py index 86ee4fc00..5a6fdde5a 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -16,8 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Network Hosts are responsible for allocating ips and setting up network. +"""Network Hosts are responsible for allocating ips and setting up network. There are multiple backend drivers that handle specific types of networking topologies. All of the network commands are issued to a subclass of @@ -61,6 +60,8 @@ from nova import rpc LOG = logging.getLogger("nova.network.manager") + + FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -111,7 +112,9 @@ class NetworkManager(manager.SchedulerDependentManager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. + """ + timeout_fixed_ips = True def __init__(self, network_driver=None, *args, **kwargs): @@ -122,10 +125,9 @@ class NetworkManager(manager.SchedulerDependentManager): *args, **kwargs) def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" self.driver.init_host() + self.driver.ensure_metadata_ip() # Set up networking for the projects for which we're already # the designated network host. ctxt = context.get_admin_context() @@ -153,11 +155,11 @@ class NetworkManager(manager.SchedulerDependentManager): self.host, time) if num: - LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num) def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - LOG.debug(_("setting network host"), context=context) + LOG.debug(_('setting network host'), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -223,39 +225,39 @@ class NetworkManager(manager.SchedulerDependentManager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - LOG.debug(_("Leasing IP %s"), address, context=context) + LOG.debug(_('Leasing IP %s'), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error(_("IP %s leased that isn't associated") % + raise exception.Error(_('IP %s leased that is not associated') % address) if instance_ref['mac_address'] != mac: inst_addr = instance_ref['mac_address'] - raise exception.Error(_("IP %(address)s leased to bad" - " mac %(inst_addr)s vs %(mac)s") % locals()) + raise exception.Error(_('IP %(address)s leased to bad mac' + ' %(inst_addr)s vs %(mac)s') % locals()) now = datetime.datetime.utcnow() self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - LOG.warn(_("IP %s leased that was already deallocated"), address, + LOG.warn(_('IP %s leased that was already deallocated'), address, context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug(_("Releasing IP %s"), address, context=context) + LOG.debug(_('Releasing IP %s'), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: - raise exception.Error(_("IP %s released that isn't associated") % + raise exception.Error(_('IP %s released that is not associated') % address) if instance_ref['mac_address'] != mac: inst_addr = instance_ref['mac_address'] - raise exception.Error(_("IP %(address)s released from" - " bad mac %(inst_addr)s vs %(mac)s") % locals()) + raise exception.Error(_('IP %(address)s released from bad mac' + ' %(inst_addr)s vs %(mac)s') % locals()) if not fixed_ip_ref['leased']: - LOG.warn(_("IP %s released that was not leased"), address, + LOG.warn(_('IP %s released that was not leased'), address, context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], @@ -285,8 +287,8 @@ class NetworkManager(manager.SchedulerDependentManager): return self.set_network_host(context, network_ref['id']) host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {'method': 'set_network_host', + 'args': {'network_id': network_ref['id']}}) return host def create_networks(self, context, cidr, num_networks, network_size, @@ -301,7 +303,7 @@ class NetworkManager(manager.SchedulerDependentManager): start = index * network_size start_v6 = index * network_size_v6 significant_bits = 32 - int(math.log(network_size, 2)) - cidr = "%s/%s" % (fixed_net[start], significant_bits) + cidr = '%s/%s' % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) net = {} net['bridge'] = FLAGS.flat_network_bridge @@ -312,13 +314,13 @@ class NetworkManager(manager.SchedulerDependentManager): net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) if num_networks > 1: - net['label'] = "%s_%d" % (label, count) + net['label'] = '%s_%d' % (label, count) else: net['label'] = label count += 1 if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 project_net_v6 = IPy.IP(cidr_v6) @@ -385,13 +387,13 @@ class FlatManager(NetworkManager): Metadata forwarding must be handled by the gateway, and since nova does not do any setup in this mode, it must be done manually. Requests to 169.254.169.254 port 80 will need to be forwarded to the api server. + """ + timeout_fixed_ips = False def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" #Fix for bug 723298 - do not call init_host on superclass #Following code has been copied for NetworkManager.init_host ctxt = context.get_admin_context() @@ -432,12 +434,11 @@ class FlatDHCPManager(NetworkManager): FlatDHCPManager will start up one dhcp server to give out addresses. It never injects network settings into the guest. Otherwise it behaves like FlatDHCPManager. + """ def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" super(FlatDHCPManager, self).init_host() self.driver.metadata_forward() @@ -489,12 +490,11 @@ class VlanManager(NetworkManager): A dhcp server is run for each subnet, so each project will have its own. For this mode to be useful, each project will need a vpn to access the instances in its subnet. + """ def init_host(self): - """Do any initialization that needs to be run if this is a - standalone service. - """ + """Do any initialization for a standalone service.""" super(VlanManager, self).init_host() self.driver.metadata_forward() @@ -565,7 +565,7 @@ class VlanManager(NetworkManager): net['vlan'] = vlan net['bridge'] = 'br%s' % vlan if(FLAGS.use_ipv6): - cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 @@ -599,8 +599,8 @@ class VlanManager(NetworkManager): return self.set_network_host(context, network_ref['id']) host = rpc.call(context, FLAGS.network_topic, - {"method": "set_network_host", - "args": {"network_id": network_ref['id']}}) + {'method': 'set_network_host', + 'args': {'network_id': network_ref['id']}}) return host diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index 93e6584f0..373060add 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans for vmwareapi. -""" +"""Implements vlans for vmwareapi.""" from nova import db from nova import exception @@ -27,8 +25,10 @@ from nova import utils from nova.virt.vmwareapi_conn import VMWareAPISession from nova.virt.vmwareapi import network_utils + LOG = logging.getLogger("nova.network.vmwareapi_net") + FLAGS = flags.FLAGS flags.DEFINE_string('vlan_interface', 'vmnic0', 'Physical network adapter name in VMware ESX host for ' @@ -42,26 +42,23 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): host_username = FLAGS.vmwareapi_host_username host_password = FLAGS.vmwareapi_host_password if not host_ip or host_username is None or host_password is None: - raise Exception(_("Must specify vmwareapi_host_ip," - "vmwareapi_host_username " - "and vmwareapi_host_password to use" - "connection_type=vmwareapi")) + raise Exception(_('Must specify vmwareapi_host_ip, ' + 'vmwareapi_host_username ' + 'and vmwareapi_host_password to use ' + 'connection_type=vmwareapi')) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) vlan_interface = FLAGS.vlan_interface # Check if the vlan_interface physical network adapter exists on the host if not network_utils.check_if_vlan_interface_exists(session, vlan_interface): - raise exception.NotFound(_("There is no physical network adapter with " - "the name %s on the ESX host") % vlan_interface) + raise exception.NetworkAdapterNotFound(adapter=vlan_interface) # Get the vSwitch associated with the Physical Adapter vswitch_associated = network_utils.get_vswitch_for_vlan_interface( session, vlan_interface) if vswitch_associated is None: - raise exception.NotFound(_("There is no virtual switch associated " - "with the physical network adapter with name %s") % - vlan_interface) + raise exception.SwicthNotFoundForNetworkAdapter(adapter=vlan_interface) # Check whether bridge already exists and retrieve the the ref of the # network whose name_label is "bridge" network_ref = network_utils.get_network_with_the_name(session, bridge) @@ -75,17 +72,13 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): pg_vlanid, pg_vswitch = \ network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge) - # Check if the vsiwtch associated is proper + # Check if the vswitch associated is proper if pg_vswitch != vswitch_associated: - raise exception.Invalid(_("vSwitch which contains the port group " - "%(bridge)s is not associated with the desired " - "physical adapter. Expected vSwitch is " - "%(vswitch_associated)s, but the one associated" - " is %(pg_vswitch)s") % locals()) + raise exception.InvalidVLANPortGroup(bridge=bridge, + expected=vswitch_associated, + actual=pg_vswitch) # Check if the vlan id is proper for the port group if pg_vlanid != vlan_num: - raise exception.Invalid(_("VLAN tag is not appropriate for the " - "port group %(bridge)s. Expected VLAN tag is " - "%(vlan_num)s, but the one associated with the " - "port group is %(pg_vlanid)s") % locals()) + raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num, + pgroup=pg_vlanid) diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py index 9a99602d9..709ef7f34 100644 --- a/nova/network/xenapi_net.py +++ b/nova/network/xenapi_net.py @@ -15,9 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Implements vlans, bridges, and iptables rules using linux utilities. -""" +"""Implements vlans, bridges, and iptables rules using linux utilities.""" import os @@ -26,38 +24,40 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils -from nova.virt.xenapi_conn import XenAPISession +from nova.virt import xenapi_conn from nova.virt.xenapi import network_utils + LOG = logging.getLogger("nova.xenapi_net") + FLAGS = flags.FLAGS def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open xenapi session - LOG.debug("ENTERING ensure_vlan_bridge in xenapi net") + LOG.debug('ENTERING ensure_vlan_bridge in xenapi net') url = FLAGS.xenapi_connection_url username = FLAGS.xenapi_connection_username password = FLAGS.xenapi_connection_password - session = XenAPISession(url, username, password) + session = xenapi_conn.XenAPISession(url, username, password) # Check whether bridge already exists # Retrieve network whose name_label is "bridge" network_ref = network_utils.NetworkHelper.find_network_with_name_label( session, bridge) - if network_ref == None: + if network_ref is None: # If bridge does not exists # 1 - create network - description = "network for nova bridge %s" % bridge + description = 'network for nova bridge %s' % bridge network_rec = {'name_label': bridge, 'name_description': description, 'other_config': {}} network_ref = session.call_xenapi('network.create', network_rec) # 2 - find PIF for VLAN - expr = 'field "device" = "%s" and \ - field "VLAN" = "-1"' % FLAGS.vlan_interface + expr = "field 'device' = '%s' and \ + field 'VLAN' = '-1'" % FLAGS.vlan_interface pifs = session.call_xenapi('PIF.get_all_records_where', expr) pif_ref = None # Multiple PIF are ok: we are dealing with a pool diff --git a/nova/notifier/__init__.py b/nova/notifier/__init__.py new file mode 100644 index 000000000..482d54e4f --- /dev/null +++ b/nova/notifier/__init__.py @@ -0,0 +1,14 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/notifier/api.py b/nova/notifier/api.py new file mode 100644 index 000000000..a3e7a039e --- /dev/null +++ b/nova/notifier/api.py @@ -0,0 +1,83 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License.import datetime + +import datetime +import uuid + +from nova import flags +from nova import utils + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('default_notification_level', 'INFO', + 'Default notification level for outgoing notifications') + +WARN = 'WARN' +INFO = 'INFO' +ERROR = 'ERROR' +CRITICAL = 'CRITICAL' +DEBUG = 'DEBUG' + +log_levels = (DEBUG, WARN, INFO, ERROR, CRITICAL) + + +class BadPriorityException(Exception): + pass + + +def notify(publisher_id, event_type, priority, payload): + """ + Sends a notification using the specified driver + + Notify parameters: + + publisher_id - the source worker_type.host of the message + event_type - the literal type of event (ex. Instance Creation) + priority - patterned after the enumeration of Python logging levels in + the set (DEBUG, WARN, INFO, ERROR, CRITICAL) + payload - A python dictionary of attributes + + Outgoing message format includes the above parameters, and appends the + following: + + message_id - a UUID representing the id for this notification + timestamp - the GMT timestamp the notification was sent at + + The composite message will be constructed as a dictionary of the above + attributes, which will then be sent via the transport mechanism defined + by the driver. + + Message example: + + {'message_id': str(uuid.uuid4()), + 'publisher_id': 'compute.host1', + 'timestamp': datetime.datetime.utcnow(), + 'priority': 'WARN', + 'event_type': 'compute.create_instance', + 'payload': {'instance_id': 12, ... }} + + """ + if priority not in log_levels: + raise BadPriorityException( + _('%s not in valid priorities' % priority)) + driver = utils.import_object(FLAGS.notification_driver) + msg = dict(message_id=str(uuid.uuid4()), + publisher_id=publisher_id, + event_type=event_type, + priority=priority, + payload=payload, + timestamp=str(datetime.datetime.utcnow())) + driver.notify(msg) diff --git a/nova/notifier/log_notifier.py b/nova/notifier/log_notifier.py new file mode 100644 index 000000000..25dfc693b --- /dev/null +++ b/nova/notifier/log_notifier.py @@ -0,0 +1,34 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from nova import flags +from nova import log as logging + + +FLAGS = flags.FLAGS + + +def notify(message): + """Notifies the recipient of the desired event given the model. + Log notifications using nova's default logging system""" + + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + logger = logging.getLogger( + 'nova.notification.%s' % message['event_type']) + getattr(logger, priority)(json.dumps(message)) diff --git a/nova/notifier/no_op_notifier.py b/nova/notifier/no_op_notifier.py new file mode 100644 index 000000000..029710505 --- /dev/null +++ b/nova/notifier/no_op_notifier.py @@ -0,0 +1,19 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def notify(message): + """Notifies the recipient of the desired event given the model""" + pass diff --git a/nova/notifier/rabbit_notifier.py b/nova/notifier/rabbit_notifier.py new file mode 100644 index 000000000..d46670b58 --- /dev/null +++ b/nova/notifier/rabbit_notifier.py @@ -0,0 +1,36 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.context + +from nova import flags +from nova import rpc + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('notification_topic', 'notifications', + 'RabbitMQ topic used for Nova notifications') + + +def notify(message): + """Sends a notification to the RabbitMQ""" + context = nova.context.get_admin_context() + priority = message.get('priority', + FLAGS.default_notification_level) + priority = priority.lower() + topic = '%s.%s' % (FLAGS.notification_topic, priority) + rpc.cast(context, topic, message) diff --git a/nova/quota.py b/nova/quota.py index 2b24c0b5b..58766e846 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -15,20 +15,21 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Quotas for instances, volumes, and floating ips -""" + +"""Quotas for instances, volumes, and floating ips.""" from nova import db from nova import exception from nova import flags -FLAGS = flags.FLAGS +FLAGS = flags.FLAGS flags.DEFINE_integer('quota_instances', 10, 'number of instances allowed per project') flags.DEFINE_integer('quota_cores', 20, 'number of instance cores allowed per project') +flags.DEFINE_integer('quota_ram', 50 * 1024, + 'megabytes of instance ram allowed per project') flags.DEFINE_integer('quota_volumes', 10, 'number of volumes allowed per project') flags.DEFINE_integer('quota_gigabytes', 1000, @@ -45,89 +46,123 @@ flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255, 'number of bytes allowed per injected file path') -def get_quota(context, project_id): - rval = {'instances': FLAGS.quota_instances, - 'cores': FLAGS.quota_cores, - 'volumes': FLAGS.quota_volumes, - 'gigabytes': FLAGS.quota_gigabytes, - 'floating_ips': FLAGS.quota_floating_ips, - 'metadata_items': FLAGS.quota_metadata_items} - - try: - quota = db.quota_get(context, project_id) - for key in rval.keys(): - if quota[key] is not None: - rval[key] = quota[key] - except exception.NotFound: - pass +def _get_default_quotas(): + defaults = { + 'instances': FLAGS.quota_instances, + 'cores': FLAGS.quota_cores, + 'ram': FLAGS.quota_ram, + 'volumes': FLAGS.quota_volumes, + 'gigabytes': FLAGS.quota_gigabytes, + 'floating_ips': FLAGS.quota_floating_ips, + 'metadata_items': FLAGS.quota_metadata_items, + 'injected_files': FLAGS.quota_max_injected_files, + 'injected_file_content_bytes': + FLAGS.quota_max_injected_file_content_bytes, + } + # -1 in the quota flags means unlimited + for key in defaults.keys(): + if defaults[key] == -1: + defaults[key] = None + return defaults + + +def get_project_quotas(context, project_id): + rval = _get_default_quotas() + quota = db.quota_get_all_by_project(context, project_id) + for key in rval.keys(): + if key in quota: + rval[key] = quota[key] return rval -def allowed_instances(context, num_instances, instance_type): - """Check quota and return min(num_instances, allowed_instances)""" +def _get_request_allotment(requested, used, quota): + if quota is None: + return requested + return quota - used + + +def allowed_instances(context, requested_instances, instance_type): + """Check quota and return min(requested_instances, allowed_instances).""" project_id = context.project_id context = context.elevated() - used_instances, used_cores = db.instance_data_get_for_project(context, - project_id) - quota = get_quota(context, project_id) - allowed_instances = quota['instances'] - used_instances - allowed_cores = quota['cores'] - used_cores - num_cores = num_instances * instance_type['vcpus'] + requested_cores = requested_instances * instance_type['vcpus'] + requested_ram = requested_instances * instance_type['memory_mb'] + usage = db.instance_data_get_for_project(context, project_id) + used_instances, used_cores, used_ram = usage + quota = get_project_quotas(context, project_id) + allowed_instances = _get_request_allotment(requested_instances, + used_instances, + quota['instances']) + allowed_cores = _get_request_allotment(requested_cores, used_cores, + quota['cores']) + allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram']) allowed_instances = min(allowed_instances, - int(allowed_cores // instance_type['vcpus'])) - return min(num_instances, allowed_instances) + allowed_cores // instance_type['vcpus'], + allowed_ram // instance_type['memory_mb']) + return min(requested_instances, allowed_instances) -def allowed_volumes(context, num_volumes, size): - """Check quota and return min(num_volumes, allowed_volumes)""" +def allowed_volumes(context, requested_volumes, size): + """Check quota and return min(requested_volumes, allowed_volumes).""" project_id = context.project_id context = context.elevated() + size = int(size) + requested_gigabytes = requested_volumes * size used_volumes, used_gigabytes = db.volume_data_get_for_project(context, project_id) - quota = get_quota(context, project_id) - allowed_volumes = quota['volumes'] - used_volumes - allowed_gigabytes = quota['gigabytes'] - used_gigabytes - size = int(size) - num_gigabytes = num_volumes * size + quota = get_project_quotas(context, project_id) + allowed_volumes = _get_request_allotment(requested_volumes, used_volumes, + quota['volumes']) + allowed_gigabytes = _get_request_allotment(requested_gigabytes, + used_gigabytes, + quota['gigabytes']) allowed_volumes = min(allowed_volumes, int(allowed_gigabytes // size)) - return min(num_volumes, allowed_volumes) + return min(requested_volumes, allowed_volumes) -def allowed_floating_ips(context, num_floating_ips): - """Check quota and return min(num_floating_ips, allowed_floating_ips)""" +def allowed_floating_ips(context, requested_floating_ips): + """Check quota and return min(requested, allowed) floating ips.""" project_id = context.project_id context = context.elevated() used_floating_ips = db.floating_ip_count_by_project(context, project_id) - quota = get_quota(context, project_id) - allowed_floating_ips = quota['floating_ips'] - used_floating_ips - return min(num_floating_ips, allowed_floating_ips) + quota = get_project_quotas(context, project_id) + allowed_floating_ips = _get_request_allotment(requested_floating_ips, + used_floating_ips, + quota['floating_ips']) + return min(requested_floating_ips, allowed_floating_ips) -def allowed_metadata_items(context, num_metadata_items): - """Check quota; return min(num_metadata_items,allowed_metadata_items)""" - project_id = context.project_id - context = context.elevated() - quota = get_quota(context, project_id) - num_allowed_metadata_items = quota['metadata_items'] - return min(num_metadata_items, num_allowed_metadata_items) +def _calculate_simple_quota(context, resource, requested): + """Check quota for resource; return min(requested, allowed).""" + quota = get_project_quotas(context, context.project_id) + allowed = _get_request_allotment(requested, 0, quota[resource]) + return min(requested, allowed) + + +def allowed_metadata_items(context, requested_metadata_items): + """Return the number of metadata items allowed.""" + return _calculate_simple_quota(context, 'metadata_items', + requested_metadata_items) -def allowed_injected_files(context): - """Return the number of injected files allowed""" - return FLAGS.quota_max_injected_files +def allowed_injected_files(context, requested_injected_files): + """Return the number of injected files allowed.""" + return _calculate_simple_quota(context, 'injected_files', + requested_injected_files) -def allowed_injected_file_content_bytes(context): - """Return the number of bytes allowed per injected file content""" - return FLAGS.quota_max_injected_file_content_bytes +def allowed_injected_file_content_bytes(context, requested_bytes): + """Return the number of bytes allowed per injected file content.""" + resource = 'injected_file_content_bytes' + return _calculate_simple_quota(context, resource, requested_bytes) def allowed_injected_file_path_bytes(context): - """Return the number of bytes allowed in an injected file path""" + """Return the number of bytes allowed in an injected file path.""" return FLAGS.quota_max_injected_file_path_bytes class QuotaError(exception.ApiError): - """Quota Exceeeded""" + """Quota Exceeeded.""" pass diff --git a/nova/rpc.py b/nova/rpc.py index b610cdf9b..2116f22c3 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -16,9 +16,12 @@ # License for the specific language governing permissions and limitations # under the License. -""" -AMQP-based RPC. Queues have consumers and publishers. +"""AMQP-based RPC. + +Queues have consumers and publishers. + No fan-out support yet. + """ import json @@ -40,17 +43,19 @@ from nova import log as logging from nova import utils -FLAGS = flags.FLAGS LOG = logging.getLogger('nova.rpc') + +FLAGS = flags.FLAGS flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool') class Connection(carrot_connection.BrokerConnection): - """Connection instance object""" + """Connection instance object.""" + @classmethod def instance(cls, new=True): - """Returns the instance""" + """Returns the instance.""" if new or not hasattr(cls, '_instance'): params = dict(hostname=FLAGS.rabbit_host, port=FLAGS.rabbit_port, @@ -71,9 +76,11 @@ class Connection(carrot_connection.BrokerConnection): @classmethod def recreate(cls): - """Recreates the connection instance + """Recreates the connection instance. + + This is necessary to recover from some network errors/disconnects. - This is necessary to recover from some network errors/disconnects""" + """ try: del cls._instance except AttributeError, e: @@ -84,10 +91,12 @@ class Connection(carrot_connection.BrokerConnection): class Consumer(messaging.Consumer): - """Consumer base class + """Consumer base class. + + Contains methods for connecting the fetch method to async loops. - Contains methods for connecting the fetch method to async loops """ + def __init__(self, *args, **kwargs): for i in xrange(FLAGS.rabbit_max_retries): if i > 0: @@ -100,19 +109,18 @@ class Consumer(messaging.Consumer): fl_host = FLAGS.rabbit_host fl_port = FLAGS.rabbit_port fl_intv = FLAGS.rabbit_retry_interval - LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is" - " unreachable: %(e)s. Trying again in %(fl_intv)d" - " seconds.") - % locals()) + LOG.error(_('AMQP server on %(fl_host)s:%(fl_port)d is' + ' unreachable: %(e)s. Trying again in %(fl_intv)d' + ' seconds.') % locals()) self.failed_connection = True if self.failed_connection: - LOG.error(_("Unable to connect to AMQP server " - "after %d tries. Shutting down."), + LOG.error(_('Unable to connect to AMQP server ' + 'after %d tries. Shutting down.'), FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): - """Wraps the parent fetch with some logic for failed connections""" + """Wraps the parent fetch with some logic for failed connection.""" # TODO(vish): the logic for failed connections and logging should be # refactored into some sort of connection manager object try: @@ -125,14 +133,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - LOG.error(_("Reconnected to queue")) + LOG.error(_('Reconnected to queue')) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # want exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception, e: # pylint: disable=W0703 if not self.failed_connection: - LOG.exception(_("Failed to fetch message from queue: %s" % e)) + LOG.exception(_('Failed to fetch message from queue: %s' % e)) self.failed_connection = True def attach_to_eventlet(self): @@ -143,8 +151,9 @@ class Consumer(messaging.Consumer): class AdapterConsumer(Consumer): - """Calls methods on a proxy object based on method and args""" - def __init__(self, connection=None, topic="broadcast", proxy=None): + """Calls methods on a proxy object based on method and args.""" + + def __init__(self, connection=None, topic='broadcast', proxy=None): LOG.debug(_('Initing the Adapter Consumer for %s') % topic) self.proxy = proxy self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size) @@ -156,13 +165,14 @@ class AdapterConsumer(Consumer): @exception.wrap_exception def _receive(self, message_data, message): - """Magically looks for a method on the proxy object and calls it + """Magically looks for a method on the proxy object and calls it. Message data should be a dictionary with two keys: method: string representing the method to call args: dictionary of arg: value Example: {'method': 'echo', 'args': {'value': 42}} + """ LOG.debug(_('received %s') % message_data) msg_id = message_data.pop('_msg_id', None) @@ -189,22 +199,23 @@ class AdapterConsumer(Consumer): if msg_id: msg_reply(msg_id, rval, None) except Exception as e: - logging.exception("Exception during message handling") + logging.exception('Exception during message handling') if msg_id: msg_reply(msg_id, None, sys.exc_info()) return class Publisher(messaging.Publisher): - """Publisher base class""" + """Publisher base class.""" pass class TopicAdapterConsumer(AdapterConsumer): - """Consumes messages on a specific topic""" - exchange_type = "topic" + """Consumes messages on a specific topic.""" + + exchange_type = 'topic' - def __init__(self, connection=None, topic="broadcast", proxy=None): + def __init__(self, connection=None, topic='broadcast', proxy=None): self.queue = topic self.routing_key = topic self.exchange = FLAGS.control_exchange @@ -214,27 +225,29 @@ class TopicAdapterConsumer(AdapterConsumer): class FanoutAdapterConsumer(AdapterConsumer): - """Consumes messages from a fanout exchange""" - exchange_type = "fanout" + """Consumes messages from a fanout exchange.""" - def __init__(self, connection=None, topic="broadcast", proxy=None): - self.exchange = "%s_fanout" % topic + exchange_type = 'fanout' + + def __init__(self, connection=None, topic='broadcast', proxy=None): + self.exchange = '%s_fanout' % topic self.routing_key = topic unique = uuid.uuid4().hex - self.queue = "%s_fanout_%s" % (topic, unique) + self.queue = '%s_fanout_%s' % (topic, unique) self.durable = False - LOG.info(_("Created '%(exchange)s' fanout exchange " - "with '%(key)s' routing key"), - dict(exchange=self.exchange, key=self.routing_key)) + LOG.info(_('Created "%(exchange)s" fanout exchange ' + 'with "%(key)s" routing key'), + dict(exchange=self.exchange, key=self.routing_key)) super(FanoutAdapterConsumer, self).__init__(connection=connection, topic=topic, proxy=proxy) class TopicPublisher(Publisher): - """Publishes messages on a specific topic""" - exchange_type = "topic" + """Publishes messages on a specific topic.""" + + exchange_type = 'topic' - def __init__(self, connection=None, topic="broadcast"): + def __init__(self, connection=None, topic='broadcast'): self.routing_key = topic self.exchange = FLAGS.control_exchange self.durable = False @@ -243,20 +256,22 @@ class TopicPublisher(Publisher): class FanoutPublisher(Publisher): """Publishes messages to a fanout exchange.""" - exchange_type = "fanout" + + exchange_type = 'fanout' def __init__(self, topic, connection=None): - self.exchange = "%s_fanout" % topic - self.queue = "%s_fanout" % topic + self.exchange = '%s_fanout' % topic + self.queue = '%s_fanout' % topic self.durable = False - LOG.info(_("Creating '%(exchange)s' fanout exchange"), - dict(exchange=self.exchange)) + LOG.info(_('Creating "%(exchange)s" fanout exchange'), + dict(exchange=self.exchange)) super(FanoutPublisher, self).__init__(connection=connection) class DirectConsumer(Consumer): - """Consumes messages directly on a channel specified by msg_id""" - exchange_type = "direct" + """Consumes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' def __init__(self, connection=None, msg_id=None): self.queue = msg_id @@ -268,8 +283,9 @@ class DirectConsumer(Consumer): class DirectPublisher(Publisher): - """Publishes messages directly on a channel specified by msg_id""" - exchange_type = "direct" + """Publishes messages directly on a channel specified by msg_id.""" + + exchange_type = 'direct' def __init__(self, connection=None, msg_id=None): self.routing_key = msg_id @@ -279,9 +295,9 @@ class DirectPublisher(Publisher): def msg_reply(msg_id, reply=None, failure=None): - """Sends a reply or an error on the channel signified by msg_id + """Sends a reply or an error on the channel signified by msg_id. - failure should be a sys.exc_info() tuple. + Failure should be a sys.exc_info() tuple. """ if failure: @@ -303,17 +319,20 @@ def msg_reply(msg_id, reply=None, failure=None): class RemoteError(exception.Error): - """Signifies that a remote class has raised an exception + """Signifies that a remote class has raised an exception. Containes a string representation of the type of the original exception, the value of the original exception, and the traceback. These are sent to the parent as a joined string so printing the exception - contains all of the relevent info.""" + contains all of the relevent info. + + """ + def __init__(self, exc_type, value, traceback): self.exc_type = exc_type self.value = value self.traceback = traceback - super(RemoteError, self).__init__("%s %s\n%s" % (exc_type, + super(RemoteError, self).__init__('%s %s\n%s' % (exc_type, value, traceback)) @@ -339,6 +358,7 @@ def _pack_context(msg, context): context out into a bunch of separate keys. If we want to support more arguments in rabbit messages, we may want to do the same for args at some point. + """ context = dict([('_context_%s' % key, value) for (key, value) in context.to_dict().iteritems()]) @@ -346,11 +366,11 @@ def _pack_context(msg, context): def call(context, topic, msg): - """Sends a message on a topic and wait for a response""" - LOG.debug(_("Making asynchronous call on %s ..."), topic) + """Sends a message on a topic and wait for a response.""" + LOG.debug(_('Making asynchronous call on %s ...'), topic) msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) - LOG.debug(_("MSG_ID is %s") % (msg_id)) + LOG.debug(_('MSG_ID is %s') % (msg_id)) _pack_context(msg, context) class WaitMessage(object): @@ -387,8 +407,8 @@ def call(context, topic, msg): def cast(context, topic, msg): - """Sends a message on a topic without waiting for a response""" - LOG.debug(_("Making asynchronous cast on %s..."), topic) + """Sends a message on a topic without waiting for a response.""" + LOG.debug(_('Making asynchronous cast on %s...'), topic) _pack_context(msg, context) conn = Connection.instance() publisher = TopicPublisher(connection=conn, topic=topic) @@ -397,8 +417,8 @@ def cast(context, topic, msg): def fanout_cast(context, topic, msg): - """Sends a message on a fanout exchange without waiting for a response""" - LOG.debug(_("Making asynchronous fanout cast...")) + """Sends a message on a fanout exchange without waiting for a response.""" + LOG.debug(_('Making asynchronous fanout cast...')) _pack_context(msg, context) conn = Connection.instance() publisher = FanoutPublisher(topic, connection=conn) @@ -407,14 +427,14 @@ def fanout_cast(context, topic, msg): def generic_response(message_data, message): - """Logs a result and exits""" + """Logs a result and exits.""" LOG.debug(_('response %s'), message_data) message.ack() sys.exit(0) def send_message(topic, message, wait=True): - """Sends a message for testing""" + """Sends a message for testing.""" msg_id = uuid.uuid4().hex message.update({'_msg_id': msg_id}) LOG.debug(_('topic is %s'), topic) @@ -425,14 +445,14 @@ def send_message(topic, message, wait=True): queue=msg_id, exchange=msg_id, auto_delete=True, - exchange_type="direct", + exchange_type='direct', routing_key=msg_id) consumer.register_callback(generic_response) publisher = messaging.Publisher(connection=Connection.instance(), exchange=FLAGS.control_exchange, durable=False, - exchange_type="topic", + exchange_type='topic', routing_key=topic) publisher.send(message) publisher.close() @@ -441,8 +461,8 @@ def send_message(topic, message, wait=True): consumer.wait() -if __name__ == "__main__": - # NOTE(vish): you can send messages from the command line using - # topic and a json sting representing a dictionary - # for the method +if __name__ == '__main__': + # You can send messages from the command line using + # topic and a json string representing a dictionary + # for the method send_message(sys.argv[1], json.loads(sys.argv[2])) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 6bb3bf3cd..55f8e0a6d 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -76,11 +76,15 @@ def zone_update(context, zone_id, data): return db.zone_update(context, zone_id, data) -def get_zone_capabilities(context, service=None): - """Returns a dict of key, value capabilities for this zone, - or for a particular class of services running in this zone.""" - return _call_scheduler('get_zone_capabilities', context=context, - params=dict(service=service)) +def get_zone_capabilities(context): + """Returns a dict of key, value capabilities for this zone.""" + return _call_scheduler('get_zone_capabilities', context=context) + + +def select(context, specs=None): + """Returns a list of hosts.""" + return _call_scheduler('select', context=context, + params={"specs": specs}) def update_service_capabilities(context, service_name, host, capabilities): @@ -107,6 +111,45 @@ def _process(func, zone): return func(nova, zone) +def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): + """Returns a list of (zone, call_result) objects.""" + if not isinstance(errors_to_ignore, (list, tuple)): + # This will also handle the default None + errors_to_ignore = [errors_to_ignore] + + pool = greenpool.GreenPool() + results = [] + for zone in db.zone_get_all(context): + try: + nova = novaclient.OpenStack(zone.username, zone.password, + zone.api_url) + nova.authenticate() + except novaclient.exceptions.BadRequest, e: + url = zone.api_url + LOG.warn(_("Failed request to zone; URL=%(url)s: %(e)s") + % locals()) + #TODO (dabo) - add logic for failure counts per zone, + # with escalation after a given number of failures. + continue + zone_method = getattr(nova.zones, method) + + def _error_trap(*args, **kwargs): + try: + return zone_method(*args, **kwargs) + except Exception as e: + if type(e) in errors_to_ignore: + return None + # TODO (dabo) - want to be able to re-raise here. + # Returning a string now; raising was causing issues. + # raise e + return "ERROR", "%s" % e + + res = pool.spawn(_error_trap, *args, **kwargs) + results.append((zone, res)) + pool.waitall() + return [(zone.id, res.wait()) for zone, res in results] + + def child_zone_helper(zone_list, func): """Fire off a command to each zone in the list. The return is [novaclient return objects] from each child zone. diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index ce05d9f6a..2094e3565 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -129,15 +129,14 @@ class Scheduler(object): if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): ec2_id = instance_ref['hostname'] - raise exception.Invalid(_('Instance(%s) is not running') % ec2_id) + raise exception.InstanceNotRunning(instance_id=ec2_id) # Checing volume node is running when any volumes are mounted # to the instance. if len(instance_ref['volumes']) != 0: services = db.service_get_all_by_topic(context, 'volume') if len(services) < 1 or not self.service_is_up(services[0]): - raise exception.Invalid(_("volume node is not alive" - "(time synchronize problem?)")) + raise exception.VolumeServiceUnavailable() # Checking src host exists and compute node src = instance_ref['host'] @@ -145,8 +144,7 @@ class Scheduler(object): # Checking src host is alive. if not self.service_is_up(services[0]): - raise exception.Invalid(_("%s is not alive(time " - "synchronize problem?)") % src) + raise exception.ComputeServiceUnavailable(host=src) def _live_migration_dest_check(self, context, instance_ref, dest): """Live migration check routine (for destination host). @@ -163,17 +161,15 @@ class Scheduler(object): # Checking dest host is alive. if not self.service_is_up(dservice_ref): - raise exception.Invalid(_("%s is not alive(time " - "synchronize problem?)") % dest) + raise exception.ComputeServiceUnavailable(host=dest) # Checking whether The host where instance is running # and dest is not same. src = instance_ref['host'] if dest == src: ec2_id = instance_ref['hostname'] - raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is " - "running now. choose other host.") - % locals()) + raise exception.UnableToMigrateToSelf(instance_id=ec2_id, + host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, @@ -204,26 +200,20 @@ class Scheduler(object): oservice_refs = db.service_get_all_compute_by_host(context, instance_ref['launched_on']) except exception.NotFound: - raise exception.Invalid(_("host %s where instance was launched " - "does not exist.") - % instance_ref['launched_on']) + raise exception.SourceHostUnavailable() oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] dest_hypervisor = dservice_ref['hypervisor_type'] if orig_hypervisor != dest_hypervisor: - raise exception.Invalid(_("Different hypervisor type" - "(%(orig_hypervisor)s->" - "%(dest_hypervisor)s)')" % locals())) + raise exception.InvalidHypervisorType() # Checkng hypervisor version. orig_hypervisor = oservice_ref['hypervisor_version'] dest_hypervisor = dservice_ref['hypervisor_version'] if orig_hypervisor > dest_hypervisor: - raise exception.Invalid(_("Older hypervisor version" - "(%(orig_hypervisor)s->" - "%(dest_hypervisor)s)") % locals()) + raise exception.DestinationHypervisorTooOld() # Checking cpuinfo. try: @@ -265,11 +255,9 @@ class Scheduler(object): mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] if mem_avail <= mem_inst: - raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s " - "to destination: %(dest)s " - "(host:%(mem_avail)s " - "<= instance:%(mem_inst)s)") - % locals()) + reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s " + "(host:%(mem_avail)s <= instance:%(mem_inst)s)") + raise exception.MigrationError(reason=reason % locals()) def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py new file mode 100644 index 000000000..483f3225c --- /dev/null +++ b/nova/scheduler/host_filter.py @@ -0,0 +1,288 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Host Filter is a driver mechanism for requesting instance resources. +Three drivers are included: AllHosts, Flavor & JSON. AllHosts just +returns the full, unfiltered list of hosts. Flavor is a hard coded +matching mechanism based on flavor criteria and JSON is an ad-hoc +filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently Flavors and/or InstanceTypes are used for +specifing the type of instance desired. Specific Nova users have +noted a need for a more expressive way of specifying instances. +Since we don't want to get into building full DSL this is a simple +form as an example of how this could be done. In reality, most +consumers will use the more rigid filters such as FlavorFilter. + +Note: These are "required" capability filters. These capabilities +used must be present or the host will be excluded. The hosts +returned are then weighed by the Weighted Scheduler. Weights +can take the more esoteric factors into consideration (such as +server affinity and customer separation). +""" + +import json + +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils + +LOG = logging.getLogger('nova.scheduler.host_filter') + +FLAGS = flags.FLAGS +flags.DEFINE_string('default_host_filter_driver', + 'nova.scheduler.host_filter.AllHostsFilter', + 'Which driver to use for filtering hosts.') + + +class HostFilter(object): + """Base class for host filter drivers.""" + + def instance_type_to_filter(self, instance_type): + """Convert instance_type into a filter for most common use-case.""" + raise NotImplementedError() + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that fulfill the filter.""" + raise NotImplementedError() + + def _full_name(self): + """module.classname of the filter driver""" + return "%s.%s" % (self.__module__, self.__class__.__name__) + + +class AllHostsFilter(HostFilter): + """NOP host filter driver. Returns all hosts in ZoneManager. + This essentially does what the old Scheduler+Chance used + to give us.""" + + def instance_type_to_filter(self, instance_type): + """Return anything to prevent base-class from raising + exception.""" + return (self._full_name(), instance_type) + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts from ZoneManager list.""" + return [(host, services) + for host, services in zone_manager.service_states.iteritems()] + + +class FlavorFilter(HostFilter): + """HostFilter driver hard-coded to work with flavors.""" + + def instance_type_to_filter(self, instance_type): + """Use instance_type to filter hosts.""" + return (self._full_name(), instance_type) + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can create instance_type.""" + instance_type = query + selected_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + capabilities = services.get('compute', {}) + host_ram_mb = capabilities['host_memory_free'] + disk_bytes = capabilities['disk_available'] + if host_ram_mb >= instance_type['memory_mb'] and \ + disk_bytes >= instance_type['local_gb']: + selected_hosts.append((host, capabilities)) + return selected_hosts + +#host entries (currently) are like: +# {'host_name-description': 'Default install of XenServer', +# 'host_hostname': 'xs-mini', +# 'host_memory_total': 8244539392, +# 'host_memory_overhead': 184225792, +# 'host_memory_free': 3868327936, +# 'host_memory_free_computed': 3840843776}, +# 'host_other-config': {}, +# 'host_ip_address': '192.168.1.109', +# 'host_cpu_info': {}, +# 'disk_available': 32954957824, +# 'disk_total': 50394562560, +# 'disk_used': 17439604736}, +# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', +# 'host_name-label': 'xs-mini'} + +# instance_type table has: +#name = Column(String(255), unique=True) +#memory_mb = Column(Integer) +#vcpus = Column(Integer) +#local_gb = Column(Integer) +#flavorid = Column(Integer, unique=True) +#swap = Column(Integer, nullable=False, default=0) +#rxtx_quota = Column(Integer, nullable=False, default=0) +#rxtx_cap = Column(Integer, nullable=False, default=0) + + +class JsonFilter(HostFilter): + """Host Filter driver to allow simple JSON-based grammar for + selecting hosts.""" + + def _equals(self, args): + """First term is == all the other terms.""" + if len(args) < 2: + return False + lhs = args[0] + for rhs in args[1:]: + if lhs != rhs: + return False + return True + + def _less_than(self, args): + """First term is < all the other terms.""" + if len(args) < 2: + return False + lhs = args[0] + for rhs in args[1:]: + if lhs >= rhs: + return False + return True + + def _greater_than(self, args): + """First term is > all the other terms.""" + if len(args) < 2: + return False + lhs = args[0] + for rhs in args[1:]: + if lhs <= rhs: + return False + return True + + def _in(self, args): + """First term is in set of remaining terms""" + if len(args) < 2: + return False + return args[0] in args[1:] + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + if len(args) < 2: + return False + lhs = args[0] + for rhs in args[1:]: + if lhs > rhs: + return False + return True + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + if len(args) < 2: + return False + lhs = args[0] + for rhs in args[1:]: + if lhs < rhs: + return False + return True + + def _not(self, args): + """Flip each of the arguments.""" + if len(args) == 0: + return False + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return True in args + + def _and(self, args): + """True if all args are True.""" + return False not in args + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def instance_type_to_filter(self, instance_type): + """Convert instance_type into JSON filter object.""" + required_ram = instance_type['memory_mb'] + required_disk = instance_type['local_gb'] + query = ['and', + ['>=', '$compute.host_memory_free', required_ram], + ['>=', '$compute.disk_available', required_disk] + ] + return (self._full_name(), json.dumps(query)) + + def _parse_string(self, string, host, services): + """Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]'""" + if not string: + return None + if string[0] != '$': + return string + + path = string[1:].split('.') + for item in path: + services = services.get(item, None) + if not services: + return None + return services + + def _process_filter(self, zone_manager, query, host, services): + """Recursively parse the query structure.""" + if len(query) == 0: + return True + cmd = query[0] + method = self.commands[cmd] # Let exception fly. + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(zone_manager, arg, host, services) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host, services) + if arg != None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can fulfill filter.""" + expanded = json.loads(query) + hosts = [] + for host, services in zone_manager.service_states.iteritems(): + r = self._process_filter(zone_manager, expanded, host, services) + if isinstance(r, list): + r = True in r + if r: + hosts.append((host, services)) + return hosts + + +DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter] + + +def choose_driver(driver_name=None): + """Since the caller may specify which driver to use we need + to have an authoritative list of what is permissible. This + function checks the driver name against a predefined set + of acceptable drivers.""" + + if not driver_name: + driver_name = FLAGS.default_host_filter_driver + for driver in DRIVERS: + if "%s.%s" % (driver.__module__, driver.__name__) == driver_name: + return driver() + raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 7d62cfc4e..55cd7208b 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -60,10 +60,9 @@ class SchedulerManager(manager.Manager): """Get a list of zones from the ZoneManager.""" return self.zone_manager.get_zone_list() - def get_zone_capabilities(self, context=None, service=None): - """Get the normalized set of capabilites for this zone, - or for a particular service.""" - return self.zone_manager.get_zone_capabilities(context, service) + def get_zone_capabilities(self, context=None): + """Get the normalized set of capabilites for this zone.""" + return self.zone_manager.get_zone_capabilities(context) def update_service_capabilities(self, context=None, service_name=None, host=None, capabilities={}): diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py new file mode 100644 index 000000000..b3d230bd2 --- /dev/null +++ b/nova/scheduler/zone_aware_scheduler.py @@ -0,0 +1,119 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The Zone Aware Scheduler is a base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +import operator + +from nova import log as logging +from nova.scheduler import api +from nova.scheduler import driver + +LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') + + +class ZoneAwareScheduler(driver.Scheduler): + """Base class for creating Zone Aware Schedulers.""" + + def _call_zone_method(self, context, method, specs): + """Call novaclient zone method. Broken out for testing.""" + return api.call_zone_method(context, method, specs=specs) + + def schedule_run_instance(self, context, topic='compute', specs={}, + *args, **kwargs): + """This method is called from nova.compute.api to provision + an instance. However we need to look at the parameters being + passed in to see if this is a request to: + 1. Create a Build Plan and then provision, or + 2. Use the Build Plan information in the request parameters + to simply create the instance (either in this zone or + a child zone).""" + + if 'blob' in specs: + return self.provision_instance(context, topic, specs) + + # Create build plan and provision ... + build_plan = self.select(context, specs) + for item in build_plan: + self.provision_instance(context, topic, item) + + def provision_instance(context, topic, item): + """Create the requested instance in this Zone or a child zone.""" + pass + + def select(self, context, *args, **kwargs): + """Select returns a list of weights and zone/host information + corresponding to the best hosts to service the request. Any + child zone information has been encrypted so as not to reveal + anything about the children.""" + return self._schedule(context, "compute", *args, **kwargs) + + def schedule(self, context, topic, *args, **kwargs): + """The schedule() contract requires we return the one + best-suited host for this request. + """ + res = self._schedule(context, topic, *args, **kwargs) + # TODO(sirp): should this be a host object rather than a weight-dict? + if not res: + raise driver.NoValidHost(_('No hosts were available')) + return res[0] + + def _schedule(self, context, topic, *args, **kwargs): + """Returns a list of hosts that meet the required specs, + ordered by their fitness. + """ + + #TODO(sandy): extract these from args. + num_instances = 1 + specs = {} + + # Filter local hosts based on requirements ... + host_list = self.filter_hosts(num_instances, specs) + + # then weigh the selected hosts. + # weighted = [{weight=weight, name=hostname}, ...] + weighted = self.weigh_hosts(num_instances, specs, host_list) + + # Next, tack on the best weights from the child zones ... + child_results = self._call_zone_method(context, "select", + specs=specs) + for child_zone, result in child_results: + for weighting in result: + # Remember the child_zone so we can get back to + # it later if needed. This implicitly builds a zone + # path structure. + host_dict = { + "weight": weighting["weight"], + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted.append(host_dict) + + weighted.sort(key=operator.itemgetter('weight')) + return weighted + + def filter_hosts(self, num, specs): + """Derived classes must override this method and return + a list of hosts in [(hostname, capability_dict)] format.""" + raise NotImplemented() + + def weigh_hosts(self, num, specs, hosts): + """Derived classes must override this method and return + a lists of hosts in [{weight, hostname}] format.""" + raise NotImplemented() diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 198f9d4cc..3ddf6f3c3 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -106,28 +106,26 @@ class ZoneManager(object): def __init__(self): self.last_zone_db_check = datetime.min self.zone_states = {} # { <zone_id> : ZoneState } - self.service_states = {} # { <service> : { <host> : { cap k : v }}} + self.service_states = {} # { <host> : { <service> : { cap k : v }}} self.green_pool = greenpool.GreenPool() def get_zone_list(self): """Return the list of zones we know about.""" return [zone.to_dict() for zone in self.zone_states.values()] - def get_zone_capabilities(self, context, service=None): + def get_zone_capabilities(self, context): """Roll up all the individual host info to generic 'service' capabilities. Each capability is aggregated into <cap>_min and <cap>_max values.""" - service_dict = self.service_states - if service: - service_dict = {service: self.service_states.get(service, {})} + hosts_dict = self.service_states # TODO(sandy) - be smarter about fabricating this structure. # But it's likely to change once we understand what the Best-Match # code will need better. combined = {} # { <service>_<cap> : (min, max), ... } - for service_name, host_dict in service_dict.iteritems(): - for host, caps_dict in host_dict.iteritems(): - for cap, value in caps_dict.iteritems(): + for host, host_dict in hosts_dict.iteritems(): + for service_name, service_dict in host_dict.iteritems(): + for cap, value in service_dict.iteritems(): key = "%s_%s" % (service_name, cap) min_value, max_value = combined.get(key, (value, value)) min_value = min(min_value, value) @@ -171,6 +169,6 @@ class ZoneManager(object): """Update the per-service capabilities based on this notification.""" logging.debug(_("Received %(service_name)s service update from " "%(host)s: %(capabilities)s") % locals()) - service_caps = self.service_states.get(service_name, {}) - service_caps[host] = capabilities - self.service_states[service_name] = service_caps + service_caps = self.service_states.get(host, {}) + service_caps[service_name] = capabilities + self.service_states[host] = service_caps diff --git a/nova/service.py b/nova/service.py index 47c0b96c0..ab1238c3b 100644 --- a/nova/service.py +++ b/nova/service.py @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Generic Node baseclass for all workers that run on hosts -""" +"""Generic Node baseclass for all workers that run on hosts.""" import inspect import os @@ -30,13 +28,11 @@ from eventlet import event from eventlet import greenthread from eventlet import greenpool -from sqlalchemy.exc import OperationalError - from nova import context from nova import db from nova import exception -from nova import log as logging from nova import flags +from nova import log as logging from nova import rpc from nova import utils from nova import version @@ -79,7 +75,7 @@ class Service(object): def start(self): vcs_string = version.version_string_with_vcs() - logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"), + logging.audit(_('Starting %(topic)s node (version %(vcs_string)s)'), {'topic': self.topic, 'vcs_string': vcs_string}) self.manager.init_host() self.model_disconnected = False @@ -140,29 +136,24 @@ class Service(object): return getattr(manager, key) @classmethod - def create(cls, - host=None, - binary=None, - topic=None, - manager=None, - report_interval=None, - periodic_interval=None): + def create(cls, host=None, binary=None, topic=None, manager=None, + report_interval=None, periodic_interval=None): """Instantiates class and passes back application object. - Args: - host, defaults to FLAGS.host - binary, defaults to basename of executable - topic, defaults to bin_name - "nova-" part - manager, defaults to FLAGS.<topic>_manager - report_interval, defaults to FLAGS.report_interval - periodic_interval, defaults to FLAGS.periodic_interval + :param host: defaults to FLAGS.host + :param binary: defaults to basename of executable + :param topic: defaults to bin_name - 'nova-' part + :param manager: defaults to FLAGS.<topic>_manager + :param report_interval: defaults to FLAGS.report_interval + :param periodic_interval: defaults to FLAGS.periodic_interval + """ if not host: host = FLAGS.host if not binary: binary = os.path.basename(inspect.stack()[-1][1]) if not topic: - topic = binary.rpartition("nova-")[2] + topic = binary.rpartition('nova-')[2] if not manager: manager = FLAGS.get('%s_manager' % topic, None) if not report_interval: @@ -175,12 +166,12 @@ class Service(object): return service_obj def kill(self): - """Destroy the service object in the datastore""" + """Destroy the service object in the datastore.""" self.stop() try: db.service_destroy(context.get_admin_context(), self.service_id) except exception.NotFound: - logging.warn(_("Service killed that has no database entry")) + logging.warn(_('Service killed that has no database entry')) def stop(self): for x in self.timers: @@ -198,7 +189,7 @@ class Service(object): pass def periodic_tasks(self): - """Tasks to be run at a periodic interval""" + """Tasks to be run at a periodic interval.""" self.manager.periodic_tasks(context.get_admin_context()) def report_state(self): @@ -208,8 +199,8 @@ class Service(object): try: service_ref = db.service_get(ctxt, self.service_id) except exception.NotFound: - logging.debug(_("The service database object disappeared, " - "Recreating it.")) + logging.debug(_('The service database object disappeared, ' + 'Recreating it.')) self._create_service_ref(ctxt) service_ref = db.service_get(ctxt, self.service_id) @@ -218,23 +209,24 @@ class Service(object): {'report_count': service_ref['report_count'] + 1}) # TODO(termie): make this pattern be more elegant. - if getattr(self, "model_disconnected", False): + if getattr(self, 'model_disconnected', False): self.model_disconnected = False - logging.error(_("Recovered model server connection!")) + logging.error(_('Recovered model server connection!')) # TODO(vish): this should probably only catch connection errors except Exception: # pylint: disable=W0702 - if not getattr(self, "model_disconnected", False): + if not getattr(self, 'model_disconnected', False): self.model_disconnected = True - logging.exception(_("model server went away")) + logging.exception(_('model server went away')) class WsgiService(object): """Base class for WSGI based services. For each api you define, you must also define these flags: - :<api>_listen: The address on which to listen - :<api>_listen_port: The port on which to listen + :<api>_listen: The address on which to listen + :<api>_listen_port: The port on which to listen + """ def __init__(self, conf, apis): @@ -248,15 +240,20 @@ class WsgiService(object): def wait(self): self.wsgi_app.wait() + def get_socket_info(self, api_name): + """Returns the (host, port) that an API was started on.""" + return self.wsgi_app.socket_info[api_name] + class ApiService(WsgiService): - """Class for our nova-api service""" + """Class for our nova-api service.""" + @classmethod def create(cls, conf=None): if not conf: conf = wsgi.paste_config_file(FLAGS.api_paste_config) if not conf: - message = (_("No paste configuration found for: %s"), + message = (_('No paste configuration found for: %s'), FLAGS.api_paste_config) raise exception.Error(message) api_endpoints = ['ec2', 'osapi'] @@ -280,11 +277,11 @@ def serve(*services): FLAGS.ParseNewFlags() name = '_'.join(x.binary for x in services) - logging.debug(_("Serving %s"), name) - logging.debug(_("Full set of FLAGS:")) + logging.debug(_('Serving %s'), name) + logging.debug(_('Full set of FLAGS:')) for flag in FLAGS: flag_get = FLAGS.get(flag, None) - logging.debug("%(flag)s : %(flag_get)s" % locals()) + logging.debug('%(flag)s : %(flag_get)s' % locals()) for x in services: x.start() @@ -315,20 +312,22 @@ def serve_wsgi(cls, conf=None): def _run_wsgi(paste_config_file, apis): - logging.debug(_("Using paste.deploy config at: %s"), paste_config_file) + logging.debug(_('Using paste.deploy config at: %s'), paste_config_file) apps = [] for api in apis: config = wsgi.load_paste_configuration(paste_config_file, api) if config is None: - logging.debug(_("No paste configuration for app: %s"), api) + logging.debug(_('No paste configuration for app: %s'), api) continue - logging.debug(_("App Config: %(api)s\n%(config)r") % locals()) - logging.info(_("Running %s API"), api) + logging.debug(_('App Config: %(api)s\n%(config)r') % locals()) + logging.info(_('Running %s API'), api) app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, getattr(FLAGS, "%s_listen_port" % api), - getattr(FLAGS, "%s_listen" % api))) + apps.append((app, + getattr(FLAGS, '%s_listen_port' % api), + getattr(FLAGS, '%s_listen' % api), + api)) if len(apps) == 0: - logging.error(_("No known API applications configured in %s."), + logging.error(_('No known API applications configured in %s.'), paste_config_file) return diff --git a/nova/test.py b/nova/test.py index 3b608520a..4deb2a175 100644 --- a/nova/test.py +++ b/nova/test.py @@ -16,12 +16,12 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Base classes for our unit tests. -Allows overriding of flags for use of fakes, -and some black magic for inline callbacks. -""" +"""Base classes for our unit tests. +Allows overriding of flags for use of fakes, and some black magic for +inline callbacks. + +""" import datetime import functools @@ -52,9 +52,9 @@ flags.DEFINE_bool('fake_tests', True, def skip_if_fake(func): - """Decorator that skips a test if running in fake mode""" + """Decorator that skips a test if running in fake mode.""" def _skipper(*args, **kw): - """Wrapped skipper function""" + """Wrapped skipper function.""" if FLAGS.fake_tests: raise unittest.SkipTest('Test cannot be run in fake mode') else: @@ -63,9 +63,10 @@ def skip_if_fake(func): class TestCase(unittest.TestCase): - """Test case base class for all unit tests""" + """Test case base class for all unit tests.""" + def setUp(self): - """Run before each test method to initialize test environment""" + """Run before each test method to initialize test environment.""" super(TestCase, self).setUp() # NOTE(vish): We need a better method for creating fixtures for tests # now that we have some required db setup for the system @@ -86,8 +87,7 @@ class TestCase(unittest.TestCase): self._original_flags = FLAGS.FlagValuesDict() def tearDown(self): - """Runs after each test method to finalize/tear down test - environment.""" + """Runs after each test method to tear down test environment.""" try: self.mox.UnsetStubs() self.stubs.UnsetAll() @@ -121,7 +121,7 @@ class TestCase(unittest.TestCase): pass def flags(self, **kw): - """Override flag variables for a test""" + """Override flag variables for a test.""" for k, v in kw.iteritems(): if k in self.flag_overrides: self.reset_flags() @@ -131,7 +131,11 @@ class TestCase(unittest.TestCase): setattr(FLAGS, k, v) def reset_flags(self): - """Resets all flag variables for the test. Runs after each test""" + """Resets all flag variables for the test. + + Runs after each test. + + """ FLAGS.Reset() for k, v in self._original_flags.iteritems(): setattr(FLAGS, k, v) @@ -158,7 +162,6 @@ class TestCase(unittest.TestCase): def _monkey_patch_wsgi(self): """Allow us to kill servers spawned by wsgi.Server.""" - # TODO(termie): change these patterns to use functools self.original_start = wsgi.Server.start @functools.wraps(self.original_start) @@ -189,12 +192,13 @@ class TestCase(unittest.TestCase): If you don't care (or don't know) a given value, you can specify the string DONTCARE as the value. This will cause that dict-item to be skipped. + """ def raise_assertion(msg): d1str = str(d1) d2str = str(d2) - base_msg = ("Dictionaries do not match. %(msg)s d1: %(d1str)s " - "d2: %(d2str)s" % locals()) + base_msg = ('Dictionaries do not match. %(msg)s d1: %(d1str)s ' + 'd2: %(d2str)s' % locals()) raise AssertionError(base_msg) d1keys = set(d1.keys()) @@ -202,8 +206,8 @@ class TestCase(unittest.TestCase): if d1keys != d2keys: d1only = d1keys - d2keys d2only = d2keys - d1keys - raise_assertion("Keys in d1 and not d2: %(d1only)s. " - "Keys in d2 and not d1: %(d2only)s" % locals()) + raise_assertion('Keys in d1 and not d2: %(d1only)s. ' + 'Keys in d2 and not d1: %(d2only)s' % locals()) for key in d1keys: d1value = d1[key] @@ -217,19 +221,19 @@ class TestCase(unittest.TestCase): "d2['%(key)s']=%(d2value)s" % locals()) def assertDictListMatch(self, L1, L2): - """Assert a list of dicts are equivalent""" + """Assert a list of dicts are equivalent.""" def raise_assertion(msg): L1str = str(L1) L2str = str(L2) - base_msg = ("List of dictionaries do not match: %(msg)s " - "L1: %(L1str)s L2: %(L2str)s" % locals()) + base_msg = ('List of dictionaries do not match: %(msg)s ' + 'L1: %(L1str)s L2: %(L2str)s' % locals()) raise AssertionError(base_msg) L1count = len(L1) L2count = len(L2) if L1count != L2count: - raise_assertion("Length mismatch: len(L1)=%(L1count)d != " - "len(L2)=%(L2count)d" % locals()) + raise_assertion('Length mismatch: len(L1)=%(L1count)d != ' + 'len(L2)=%(L2count)d' % locals()) for d1, d2 in zip(L1, L2): self.assertDictMatch(d1, d2) diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py index 0860b51ac..dbdd0928a 100644 --- a/nova/tests/api/openstack/extensions/foxinsocks.py +++ b/nova/tests/api/openstack/extensions/foxinsocks.py @@ -63,31 +63,33 @@ class Foxinsocks(object): self._delete_tweedle)) return actions - def get_response_extensions(self): - response_exts = [] + def get_request_extensions(self): + request_exts = [] - def _goose_handler(res): + def _goose_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = json.loads(res.body) - data['flavor']['googoose'] = "Gooey goo for chewy chewing!" - return data + data['flavor']['googoose'] = req.GET.get('chewing') + res.body = json.dumps(data) + return res - resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + req_ext1 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _goose_handler) - response_exts.append(resp_ext) + request_exts.append(req_ext1) - def _bands_handler(res): + def _bands_handler(req, res): #NOTE: This only handles JSON responses. # You can use content type header to test for XML. data = json.loads(res.body) data['big_bands'] = 'Pig Bands!' - return data + res.body = json.dumps(data) + return res - resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)', + req_ext2 = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', _bands_handler) - response_exts.append(resp_ext2) - return response_exts + request_exts.append(req_ext2) + return request_exts def _add_tweedle(self, input_dict, req, id): diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 8b0729c35..bf51239e6 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -228,6 +228,9 @@ class FakeToken(object): # FIXME(sirp): let's not use id here id = 0 + def __getitem__(self, key): + return getattr(self, key) + def __init__(self, **kwargs): FakeToken.id += 1 self.id = FakeToken.id diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index 5112c486f..c63431a45 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -53,13 +53,13 @@ class APITest(test.TestCase): #api.application = succeed api = self._wsgi_app(succeed) resp = Request.blank('/').get_response(api) - self.assertFalse('computeFault' in resp.body, resp.body) + self.assertFalse('cloudServersFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 200, resp.body) #api.application = raise_webob_exc api = self._wsgi_app(raise_webob_exc) resp = Request.blank('/').get_response(api) - self.assertFalse('computeFault' in resp.body, resp.body) + self.assertFalse('cloudServersFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) #api.application = raise_api_fault @@ -71,11 +71,11 @@ class APITest(test.TestCase): #api.application = fail api = self._wsgi_app(fail) resp = Request.blank('/').get_response(api) - self.assertTrue('{"computeFault' in resp.body, resp.body) + self.assertTrue('{"cloudServersFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) #api.application = fail api = self._wsgi_app(fail) resp = Request.blank('/.xml').get_response(api) - self.assertTrue('<computeFault' in resp.body, resp.body) + self.assertTrue('<cloudServersFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 481d34ed1..544298602 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -45,10 +45,10 @@ class StubController(nova.wsgi.Controller): class StubExtensionManager(object): - def __init__(self, resource_ext=None, action_ext=None, response_ext=None): + def __init__(self, resource_ext=None, action_ext=None, request_ext=None): self.resource_ext = resource_ext self.action_ext = action_ext - self.response_ext = response_ext + self.request_ext = request_ext def get_name(self): return "Tweedle Beetle Extension" @@ -71,11 +71,11 @@ class StubExtensionManager(object): action_exts.append(self.action_ext) return action_exts - def get_response_extensions(self): - response_exts = [] - if self.response_ext: - response_exts.append(self.response_ext) - return response_exts + def get_request_extensions(self): + request_extensions = [] + if self.request_ext: + request_extensions.append(self.request_ext) + return request_extensions class ExtensionControllerTest(unittest.TestCase): @@ -183,10 +183,10 @@ class ActionExtensionTest(unittest.TestCase): self.assertEqual(404, response.status_int) -class ResponseExtensionTest(unittest.TestCase): +class RequestExtensionTest(unittest.TestCase): def setUp(self): - super(ResponseExtensionTest, self).setUp() + super(RequestExtensionTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} @@ -195,42 +195,39 @@ class ResponseExtensionTest(unittest.TestCase): def tearDown(self): self.stubs.UnsetAll() - super(ResponseExtensionTest, self).tearDown() + super(RequestExtensionTest, self).tearDown() def test_get_resources_with_stub_mgr(self): - test_resp = "Gooey goo for chewy chewing!" - - def _resp_handler(res): + def _req_handler(req, res): # only handle JSON responses data = json.loads(res.body) - data['flavor']['googoose'] = test_resp - return data + data['flavor']['googoose'] = req.GET.get('chewing') + res.body = json.dumps(data) + return res - resp_ext = extensions.ResponseExtension('GET', + req_ext = extensions.RequestExtension('GET', '/v1.1/flavors/:(id)', - _resp_handler) + _req_handler) - manager = StubExtensionManager(None, None, resp_ext) + manager = StubExtensionManager(None, None, req_ext) app = fakes.wsgi_app() ext_midware = extensions.ExtensionMiddleware(app, manager) - request = webob.Request.blank("/v1.1/flavors/1") + request = webob.Request.blank("/v1.1/flavors/1?chewing=bluegoo") request.environ['api.version'] = '1.1' response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) response_data = json.loads(response.body) - self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual('bluegoo', response_data['flavor']['googoose']) def test_get_resources_with_mgr(self): - test_resp = "Gooey goo for chewy chewing!" - app = fakes.wsgi_app() ext_midware = extensions.ExtensionMiddleware(app) - request = webob.Request.blank("/v1.1/flavors/1") + request = webob.Request.blank("/v1.1/flavors/1?chewing=newblue") request.environ['api.version'] = '1.1' response = request.get_response(ext_midware) self.assertEqual(200, response.status_int) response_data = json.loads(response.body) - self.assertEqual(test_resp, response_data['flavor']['googoose']) + self.assertEqual('newblue', response_data['flavor']['googoose']) self.assertEqual("Pig Bands!", response_data['big_bands']) diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 954d72adf..d1c62e454 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -47,8 +47,8 @@ def return_instance_types(context, num=2): return instance_types -def return_instance_type_not_found(context, flavorid): - raise exception.NotFound() +def return_instance_type_not_found(context, flavor_id): + raise exception.InstanceTypeNotFound(flavor_id=flavor_id) class FlavorsTest(test.TestCase): diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index 9be753f84..56be0f1cc 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -45,10 +45,8 @@ class ImageMetaDataTest(unittest.TestCase): 'is_public': True, 'deleted_at': None, 'properties': { - 'type': 'ramdisk', 'key1': 'value1', - 'key2': 'value2' - }, + 'key2': 'value2'}, 'size': 5882349}, {'status': 'active', 'name': 'image2', @@ -62,10 +60,21 @@ class ImageMetaDataTest(unittest.TestCase): 'is_public': True, 'deleted_at': None, 'properties': { - 'type': 'ramdisk', 'key1': 'value1', - 'key2': 'value2' - }, + 'key2': 'value2'}, + 'size': 5882349}, + {'status': 'active', + 'name': 'image3', + 'deleted': False, + 'container_format': None, + 'created_at': '2011-03-22T17:40:15', + 'disk_format': None, + 'updated_at': '2011-03-22T17:40:15', + 'id': '3', + 'location': 'file:///var/lib/glance/images/2', + 'is_public': True, + 'deleted_at': None, + 'properties': {}, 'size': 5882349}, ] @@ -77,6 +86,10 @@ class ImageMetaDataTest(unittest.TestCase): fakes.FakeAuthManager.auth_data = {} fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) + # NOTE(dprince) max out properties/metadata in image 3 for testing + img3 = self.IMAGE_FIXTURES[2] + for num in range(FLAGS.quota_metadata_items): + img3['properties']['key%i' % num] = "blah" fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES) def tearDown(self): @@ -164,3 +177,25 @@ class ImageMetaDataTest(unittest.TestCase): req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(404, res.status_int) + + def test_too_many_metadata_items_on_create(self): + data = {"metadata": {}} + for num in range(FLAGS.quota_metadata_items + 1): + data['metadata']['key%i' % num] = "blah" + json_string = str(data).replace("\'", "\"") + req = webob.Request.blank('/v1.1/images/2/meta') + req.environ['api.version'] = '1.1' + req.method = 'POST' + req.body = json_string + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_too_many_metadata_items_on_put(self): + req = webob.Request.blank('/v1.1/images/3/meta/blah') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"blah": "blah"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index ae86d0686..2c329f920 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -75,6 +75,18 @@ class _BaseImageServiceTests(test.TestCase): self.context, 'bad image id') + def test_create_and_show_non_existing_image_by_name(self): + fixture = self._make_fixture('test image') + num_images = len(self.service.index(self.context)) + + image_id = self.service.create(self.context, fixture)['id'] + + self.assertNotEquals(None, image_id) + self.assertRaises(exception.ImageNotFound, + self.service.show_by_name, + self.context, + 'bad image id') + def test_update(self): fixture = self._make_fixture('test image') image_id = self.service.create(self.context, fixture)['id'] @@ -538,7 +550,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { 'id': 127, - 'name': 'killed backup', 'serverId': 42, + 'name': 'killed backup', + 'serverId': 42, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', @@ -584,7 +597,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 124, 'name': 'queued backup', - 'serverId': 42, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', @@ -606,7 +619,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 125, 'name': 'saving backup', - 'serverId': 42, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'SAVING', @@ -629,7 +642,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 126, 'name': 'active backup', - 'serverId': 42, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -650,7 +663,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { 'id': 127, - 'name': 'killed backup', 'serverId': 42, + 'name': 'killed backup', + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index df367005d..70f59eda6 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -27,16 +27,16 @@ import webob from xml.dom.minidom import parseString +import nova.context from nova.api.openstack import limits -from nova.api.openstack.limits import Limit TEST_LIMITS = [ - Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), - Limit("POST", "*", ".*", 7, limits.PER_MINUTE), - Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE), - Limit("PUT", "*", "", 10, limits.PER_MINUTE), - Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE), + limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE), + limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE), + limits.Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE), + limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE), + limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE), ] @@ -48,6 +48,13 @@ class BaseLimitTestSuite(unittest.TestCase): self.time = 0.0 self.stubs = stubout.StubOutForTesting() self.stubs.Set(limits.Limit, "_get_time", self._get_time) + self.absolute_limits = {} + + def stub_get_project_quotas(context, project_id): + return self.absolute_limits + + self.stubs.Set(nova.quota, "get_project_quotas", + stub_get_project_quotas) def tearDown(self): """Run after each test.""" @@ -58,15 +65,15 @@ class BaseLimitTestSuite(unittest.TestCase): return self.time -class LimitsControllerTest(BaseLimitTestSuite): +class LimitsControllerV10Test(BaseLimitTestSuite): """ - Tests for `limits.LimitsController` class. + Tests for `limits.LimitsControllerV10` class. """ def setUp(self): """Run before each test.""" BaseLimitTestSuite.setUp(self) - self.controller = limits.LimitsController() + self.controller = limits.LimitsControllerV10() def _get_index_request(self, accept_header="application/json"): """Helper to set routing arguments.""" @@ -76,17 +83,31 @@ class LimitsControllerTest(BaseLimitTestSuite): "action": "index", "controller": "", }) + context = nova.context.RequestContext('testuser', 'testproject') + request.environ["nova.context"] = context return request def _populate_limits(self, request): """Put limit info into a request.""" _limits = [ - Limit("GET", "*", ".*", 10, 60).display(), - Limit("POST", "*", ".*", 5, 60 * 60).display(), + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), ] request.environ["nova.limits"] = _limits return request + def _setup_absolute_limits(self): + self.absolute_limits = { + 'instances': 5, + 'cores': 8, + 'ram': 2 ** 13, + 'volumes': 21, + 'gigabytes': 34, + 'metadata_items': 55, + 'injected_files': 89, + 'injected_file_content_bytes': 144, + } + def test_empty_index_json(self): """Test getting empty limit details in JSON.""" request = self._get_index_request() @@ -104,6 +125,7 @@ class LimitsControllerTest(BaseLimitTestSuite): """Test getting limit details in JSON.""" request = self._get_index_request() request = self._populate_limits(request) + self._setup_absolute_limits() response = request.get_response(self.controller) expected = { "limits": { @@ -125,7 +147,15 @@ class LimitsControllerTest(BaseLimitTestSuite): "remaining": 5, "unit": "HOUR", }], - "absolute": {}, + "absolute": { + "maxTotalInstances": 5, + "maxTotalCores": 8, + "maxTotalRAMSize": 2 ** 13, + "maxServerMeta": 55, + "maxImageMeta": 55, + "maxPersonality": 89, + "maxPersonalitySize": 144, + }, }, } body = json.loads(response.body) @@ -171,6 +201,205 @@ class LimitsControllerTest(BaseLimitTestSuite): self.assertEqual(expected.toxml(), body.toxml()) +class LimitsControllerV11Test(BaseLimitTestSuite): + """ + Tests for `limits.LimitsControllerV11` class. + """ + + def setUp(self): + """Run before each test.""" + BaseLimitTestSuite.setUp(self) + self.controller = limits.LimitsControllerV11() + + def _get_index_request(self, accept_header="application/json"): + """Helper to set routing arguments.""" + request = webob.Request.blank("/") + request.accept = accept_header + request.environ["wsgiorg.routing_args"] = (None, { + "action": "index", + "controller": "", + }) + context = nova.context.RequestContext('testuser', 'testproject') + request.environ["nova.context"] = context + return request + + def _populate_limits(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("POST", "*", ".*", 5, 60 * 60).display(), + limits.Limit("GET", "changes-since*", "changes-since", + 5, 60).display(), + ] + request.environ["nova.limits"] = _limits + return request + + def test_empty_index_json(self): + """Test getting empty limit details in JSON.""" + request = self._get_index_request() + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [], + "absolute": {}, + }, + } + body = json.loads(response.body) + self.assertEqual(expected, body) + + def test_index_json(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits(request) + self.absolute_limits = { + 'ram': 512, + 'instances': 5, + 'cores': 21, + } + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": 0, + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + { + "verb": "POST", + "next-available": 0, + "unit": "HOUR", + "value": 5, + "remaining": 5, + }, + ], + }, + { + "regex": "changes-since", + "uri": "changes-since*", + "limit": [ + { + "verb": "GET", + "next-available": 0, + "unit": "MINUTE", + "value": 5, + "remaining": 5, + }, + ], + }, + + ], + "absolute": { + "maxTotalRAMSize": 512, + "maxTotalInstances": 5, + "maxTotalCores": 21, + }, + }, + } + body = json.loads(response.body) + self.assertEqual(expected, body) + + def _populate_limits_diff_regex(self, request): + """Put limit info into a request.""" + _limits = [ + limits.Limit("GET", "*", ".*", 10, 60).display(), + limits.Limit("GET", "*", "*.*", 10, 60).display(), + ] + request.environ["nova.limits"] = _limits + return request + + def test_index_diff_regex(self): + """Test getting limit details in JSON.""" + request = self._get_index_request() + request = self._populate_limits_diff_regex(request) + response = request.get_response(self.controller) + expected = { + "limits": { + "rate": [ + { + "regex": ".*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": 0, + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + { + "regex": "*.*", + "uri": "*", + "limit": [ + { + "verb": "GET", + "next-available": 0, + "unit": "MINUTE", + "value": 10, + "remaining": 10, + }, + ], + }, + + ], + "absolute": {}, + }, + } + body = json.loads(response.body) + self.assertEqual(expected, body) + + def _test_index_absolute_limits_json(self, expected): + request = self._get_index_request() + response = request.get_response(self.controller) + body = json.loads(response.body) + self.assertEqual(expected, body['limits']['absolute']) + + def test_index_ignores_extra_absolute_limits_json(self): + self.absolute_limits = {'unknown_limit': 9001} + self._test_index_absolute_limits_json({}) + + def test_index_absolute_ram_json(self): + self.absolute_limits = {'ram': 1024} + self._test_index_absolute_limits_json({'maxTotalRAMSize': 1024}) + + def test_index_absolute_cores_json(self): + self.absolute_limits = {'cores': 17} + self._test_index_absolute_limits_json({'maxTotalCores': 17}) + + def test_index_absolute_instances_json(self): + self.absolute_limits = {'instances': 19} + self._test_index_absolute_limits_json({'maxTotalInstances': 19}) + + def test_index_absolute_metadata_json(self): + # NOTE: both server metadata and image metadata are overloaded + # into metadata_items + self.absolute_limits = {'metadata_items': 23} + expected = { + 'maxServerMeta': 23, + 'maxImageMeta': 23, + } + self._test_index_absolute_limits_json(expected) + + def test_index_absolute_injected_files(self): + self.absolute_limits = { + 'injected_files': 17, + 'injected_file_content_bytes': 86753, + } + expected = { + 'maxPersonality': 17, + 'maxPersonalitySize': 86753, + } + self._test_index_absolute_limits_json(expected) + + class LimitMiddlewareTest(BaseLimitTestSuite): """ Tests for the `limits.RateLimitingMiddleware` class. @@ -185,7 +414,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite): """Prepare middleware for use through fake WSGI app.""" BaseLimitTestSuite.setUp(self) _limits = [ - Limit("GET", "*", ".*", 1, 60), + limits.Limit("GET", "*", ".*", 1, 60), ] self.app = limits.RateLimitingMiddleware(self._empty_app, _limits) @@ -238,7 +467,7 @@ class LimitTest(BaseLimitTestSuite): def test_GET_no_delay(self): """Test a limit handles 1 GET per second.""" - limit = Limit("GET", "*", ".*", 1, 1) + limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertEqual(None, delay) self.assertEqual(0, limit.next_request) @@ -246,7 +475,7 @@ class LimitTest(BaseLimitTestSuite): def test_GET_delay(self): """Test two calls to 1 GET per second limit.""" - limit = Limit("GET", "*", ".*", 1, 1) + limit = limits.Limit("GET", "*", ".*", 1, 1) delay = limit("GET", "/anything") self.assertEqual(None, delay) diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index c8d456472..c4d1d4fd8 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -21,11 +21,19 @@ import unittest import webob +from nova import flags from nova.api import openstack from nova.tests.api.openstack import fakes import nova.wsgi +FLAGS = flags.FLAGS + + +def return_create_instance_metadata_max(context, server_id, metadata): + return stub_max_server_metadata() + + def return_create_instance_metadata(context, server_id, metadata): return stub_server_metadata() @@ -48,8 +56,14 @@ def stub_server_metadata(): "key2": "value2", "key3": "value3", "key4": "value4", - "key5": "value5" - } + "key5": "value5"} + return metadata + + +def stub_max_server_metadata(): + metadata = {"metadata": {}} + for num in range(FLAGS.quota_metadata_items): + metadata['metadata']['key%i' % num] = "blah" return metadata @@ -69,7 +83,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_index(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', - return_server_metadata) + return_server_metadata) req = webob.Request.blank('/v1.1/servers/1/meta') req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) @@ -79,7 +93,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_index_no_data(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', - return_empty_server_metadata) + return_empty_server_metadata) req = webob.Request.blank('/v1.1/servers/1/meta') req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) @@ -89,7 +103,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_show(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', - return_server_metadata) + return_server_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/key5') req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) @@ -99,7 +113,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_show_meta_not_found(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', - return_empty_server_metadata) + return_empty_server_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/key6') req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) @@ -108,7 +122,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_delete(self): self.stubs.Set(nova.db.api, 'instance_metadata_delete', - delete_server_metadata) + delete_server_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/key5') req.environ['api.version'] = '1.1' req.method = 'DELETE' @@ -117,7 +131,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_create(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', - return_create_instance_metadata) + return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/meta') req.environ['api.version'] = '1.1' req.method = 'POST' @@ -130,7 +144,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', - return_create_instance_metadata) + return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/key1') req.environ['api.version'] = '1.1' req.method = 'PUT' @@ -143,7 +157,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_update_item_too_many_keys(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', - return_create_instance_metadata) + return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/key1') req.environ['api.version'] = '1.1' req.method = 'PUT' @@ -154,7 +168,7 @@ class ServerMetaDataTest(unittest.TestCase): def test_update_item_body_uri_mismatch(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', - return_create_instance_metadata) + return_create_instance_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/bad') req.environ['api.version'] = '1.1' req.method = 'PUT' @@ -162,3 +176,29 @@ class ServerMetaDataTest(unittest.TestCase): req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) + + def test_too_many_metadata_items_on_create(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + data = {"metadata": {}} + for num in range(FLAGS.quota_metadata_items + 1): + data['metadata']['key%i' % num] = "blah" + json_string = str(data).replace("\'", "\"") + req = webob.Request.blank('/v1.1/servers/1/meta') + req.environ['api.version'] = '1.1' + req.method = 'POST' + req.body = json_string + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_to_many_metadata_items_on_update_item(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata_max) + req = webob.Request.blank('/v1.1/servers/1/meta/key1') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"a new key": "a new value"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 313676e72..fbde5c9ce 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -32,6 +32,8 @@ from nova import test import nova.api.openstack from nova.api.openstack import servers import nova.compute.api +from nova.compute import instance_types +from nova.compute import power_state import nova.db.api from nova.db.sqlalchemy.models import Instance from nova.db.sqlalchemy.models import InstanceMetadata @@ -55,6 +57,12 @@ def return_server_with_addresses(private, public): return _return_server +def return_server_with_power_state(power_state): + def _return_server(context, id): + return stub_instance(id, power_state=power_state) + return _return_server + + def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] @@ -71,13 +79,19 @@ def instance_address(context, instance_id): return None -def stub_instance(id, user_id=1, private_address=None, public_addresses=None): +def stub_instance(id, user_id=1, private_address=None, public_addresses=None, + host=None, power_state=0): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) - if public_addresses == None: + inst_type = instance_types.get_instance_type_by_flavor_id(1) + + if public_addresses is None: public_addresses = list() + if host is not None: + host = str(host) + instance = { "id": id, "admin_pass": "", @@ -89,14 +103,14 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None): "launch_index": 0, "key_name": "", "key_data": "", - "state": 0, + "state": power_state, "state_description": "", "memory_mb": 0, "vcpus": 0, "local_gb": 0, "hostname": "", - "host": None, - "instance_type": "1", + "host": host, + "instance_type": dict(inst_type), "user_data": "", "reservation_id": "", "mac_address": "", @@ -120,6 +134,20 @@ def fake_compute_api(cls, req, id): return True +def find_host(self, context, instance_id): + return "nova" + + +class MockSetAdminPassword(object): + def __init__(self): + self.instance_id = None + self.password = None + + def __call__(self, context, instance_id, password): + self.instance_id = instance_id + self.password = password + + class ServersTest(test.TestCase): def setUp(self): @@ -165,7 +193,7 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict['server']['id'], 1) self.assertEqual(res_dict['server']['name'], 'server1') - def test_get_server_by_id_v11(self): + def test_get_server_by_id_v1_1(self): req = webob.Request.blank('/v1.1/servers/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -192,6 +220,26 @@ class ServersTest(test.TestCase): print res_dict['server'] self.assertEqual(res_dict['server']['links'], expected_links) + def test_get_server_by_id_with_addresses_xml(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1') + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + dom = minidom.parseString(res.body) + server = dom.childNodes[0] + self.assertEquals(server.nodeName, 'server') + self.assertEquals(server.getAttribute('id'), '1') + self.assertEquals(server.getAttribute('name'), 'server1') + (public,) = server.getElementsByTagName('public') + (ip,) = public.getElementsByTagName('ip') + self.assertEquals(ip.getAttribute('addr'), '1.2.3.4') + (private,) = server.getElementsByTagName('private') + (ip,) = private.getElementsByTagName('ip') + self.assertEquals(ip.getAttribute('addr'), '192.168.0.3') + def test_get_server_by_id_with_addresses(self): private = "192.168.0.3" public = ["1.2.3.4"] @@ -208,7 +256,85 @@ class ServersTest(test.TestCase): self.assertEqual(len(addresses["private"]), 1) self.assertEqual(addresses["private"][0], private) - def test_get_server_by_id_with_addresses_v11(self): + def test_get_server_addresses_v1_0(self): + private = '192.168.0.3' + public = ['1.2.3.4'] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1/ips') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict, { + 'addresses': {'public': public, 'private': [private]}}) + + def test_get_server_addresses_xml_v1_0(self): + private_expected = "192.168.0.3" + public_expected = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private_expected, + public_expected) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1/ips') + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + dom = minidom.parseString(res.body) + (addresses,) = dom.childNodes + self.assertEquals(addresses.nodeName, 'addresses') + (public,) = addresses.getElementsByTagName('public') + (ip,) = public.getElementsByTagName('ip') + self.assertEquals(ip.getAttribute('addr'), public_expected[0]) + (private,) = addresses.getElementsByTagName('private') + (ip,) = private.getElementsByTagName('ip') + self.assertEquals(ip.getAttribute('addr'), private_expected) + + def test_get_server_addresses_public_v1_0(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1/ips/public') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict, {'public': public}) + + def test_get_server_addresses_private_v1_0(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1/ips/private') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict, {'private': [private]}) + + def test_get_server_addresses_public_xml_v1_0(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1/ips/public') + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + dom = minidom.parseString(res.body) + (public_node,) = dom.childNodes + self.assertEquals(public_node.nodeName, 'public') + (ip,) = public_node.getElementsByTagName('ip') + self.assertEquals(ip.getAttribute('addr'), public[0]) + + def test_get_server_addresses_private_xml_v1_0(self): + private = "192.168.0.3" + public = ["1.2.3.4"] + new_return_server = return_server_with_addresses(private, public) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + req = webob.Request.blank('/v1.0/servers/1/ips/private') + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + dom = minidom.parseString(res.body) + (private_node,) = dom.childNodes + self.assertEquals(private_node.nodeName, 'private') + (ip,) = private_node.getElementsByTagName('ip') + self.assertEquals(ip.getAttribute('addr'), private) + + def test_get_server_by_id_with_addresses_v1_1(self): private = "192.168.0.3" public = ["1.2.3.4"] new_return_server = return_server_with_addresses(private, public) @@ -238,7 +364,7 @@ class ServersTest(test.TestCase): self.assertEqual(s.get('imageId', None), None) i += 1 - def test_get_server_list_v11(self): + def test_get_server_list_v1_1(self): req = webob.Request.blank('/v1.1/servers') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -361,6 +487,7 @@ class ServersTest(test.TestCase): "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping) self.stubs.Set(nova.api.openstack.common, "get_image_id_from_image_hash", image_id_from_hash) + self.stubs.Set(nova.compute.api.API, "_find_host", find_host) def _test_create_instance_helper(self): self._setup_for_create_instance() @@ -459,16 +586,16 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) - def test_create_instance_v11(self): + def test_create_instance_v1_1(self): self._setup_for_create_instance() - imageRef = 'http://localhost/v1.1/images/2' - flavorRef = 'http://localhost/v1.1/flavors/3' + image_ref = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' body = { 'server': { 'name': 'server_test', - 'imageRef': imageRef, - 'flavorRef': flavorRef, + 'imageRef': image_ref, + 'flavorRef': flavor_ref, 'metadata': { 'hello': 'world', 'open': 'stack', @@ -488,17 +615,17 @@ class ServersTest(test.TestCase): self.assertEqual(16, len(server['adminPass'])) self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) - self.assertEqual(flavorRef, server['flavorRef']) - self.assertEqual(imageRef, server['imageRef']) + self.assertEqual(flavor_ref, server['flavorRef']) + self.assertEqual(image_ref, server['imageRef']) self.assertEqual(res.status_int, 200) - def test_create_instance_v11_bad_href(self): + def test_create_instance_v1_1_bad_href(self): self._setup_for_create_instance() - imageRef = 'http://localhost/v1.1/images/asdf' - flavorRef = 'http://localhost/v1.1/flavors/3' + image_ref = 'http://localhost/v1.1/images/asdf' + flavor_ref = 'http://localhost/v1.1/flavors/3' body = dict(server=dict( - name='server_test', imageRef=imageRef, flavorRef=flavorRef, + name='server_test', imageRef=image_ref, flavorRef=flavor_ref, metadata={'hello': 'world', 'open': 'stack'}, personality={})) req = webob.Request.blank('/v1.1/servers') @@ -508,6 +635,97 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_create_instance_v1_1_local_href(self): + self._setup_for_create_instance() + + image_ref = 'http://localhost/v1.1/images/2' + image_ref_local = '2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'server_test', + 'imageRef': image_ref_local, + 'flavorRef': flavor_ref, + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + + server = json.loads(res.body)['server'] + self.assertEqual(1, server['id']) + self.assertEqual(flavor_ref, server['flavorRef']) + self.assertEqual(image_ref, server['imageRef']) + self.assertEqual(res.status_int, 200) + + def test_create_instance_with_admin_pass_v1_0(self): + self._setup_for_create_instance() + + body = { + 'server': { + 'name': 'test-server-create', + 'imageId': 3, + 'flavorId': 1, + 'adminPass': 'testpass', + }, + } + + req = webob.Request.blank('/v1.0/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + res = json.loads(res.body) + self.assertNotEqual(res['server']['adminPass'], + body['server']['adminPass']) + + def test_create_instance_with_admin_pass_v1_1(self): + self._setup_for_create_instance() + + image_ref = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'server_test', + 'imageRef': image_ref, + 'flavorRef': flavor_ref, + 'adminPass': 'testpass', + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + server = json.loads(res.body)['server'] + self.assertEqual(server['adminPass'], body['server']['adminPass']) + + def test_create_instance_with_empty_admin_pass_v1_1(self): + self._setup_for_create_instance() + + image_ref = 'http://localhost/v1.1/images/2' + flavor_ref = 'http://localhost/v1.1/flavors/3' + body = { + 'server': { + 'name': 'server_test', + 'imageRef': image_ref, + 'flavorRef': flavor_ref, + 'adminPass': '', + }, + } + + req = webob.Request.blank('/v1.1/servers') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_update_no_body(self): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' @@ -550,20 +768,22 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) - def test_update_server_v10(self): + def test_update_server_v1_0(self): inst_dict = dict(name='server_test', adminPass='bacon') self.body = json.dumps(dict(server=inst_dict)) def server_update(context, id, params): filtered_dict = dict( - display_name='server_test', - admin_pass='bacon', + display_name='server_test' ) self.assertEqual(params, filtered_dict) return filtered_dict self.stubs.Set(nova.db.api, 'instance_update', server_update) + self.stubs.Set(nova.compute.api.API, "_find_host", find_host) + mock_method = MockSetAdminPassword() + self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' @@ -571,8 +791,10 @@ class ServersTest(test.TestCase): req.body = self.body res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 204) + self.assertEqual(mock_method.instance_id, '1') + self.assertEqual(mock_method.password, 'bacon') - def test_update_server_adminPass_ignored_v11(self): + def test_update_server_adminPass_ignored_v1_1(self): inst_dict = dict(name='server_test', adminPass='bacon') self.body = json.dumps(dict(server=inst_dict)) @@ -613,11 +835,27 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 501) - def test_server_backup_schedule_deprecated_v11(self): + def test_server_backup_schedule_deprecated_v1_1(self): req = webob.Request.blank('/v1.1/servers/1/backup_schedule') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) + def test_get_all_server_details_xml_v1_0(self): + req = webob.Request.blank('/v1.0/servers/detail') + req.headers['Accept'] = 'application/xml' + res = req.get_response(fakes.wsgi_app()) + print res.body + dom = minidom.parseString(res.body) + for i, server in enumerate(dom.getElementsByTagName('server')): + self.assertEqual(server.getAttribute('id'), str(i)) + self.assertEqual(server.getAttribute('hostId'), '') + self.assertEqual(server.getAttribute('name'), 'server%d' % i) + self.assertEqual(server.getAttribute('imageId'), '10') + self.assertEqual(server.getAttribute('status'), 'BUILD') + (meta,) = server.getElementsByTagName('meta') + self.assertEqual(meta.getAttribute('key'), 'seq') + self.assertEqual(meta.firstChild.data.strip(), str(i)) + def test_get_all_server_details_v1_0(self): req = webob.Request.blank('/v1.0/servers/detail') res = req.get_response(fakes.wsgi_app()) @@ -628,9 +866,9 @@ class ServersTest(test.TestCase): self.assertEqual(s['hostId'], '') self.assertEqual(s['name'], 'server%d' % i) self.assertEqual(s['imageId'], '10') - self.assertEqual(s['flavorId'], '1') + self.assertEqual(s['flavorId'], 1) self.assertEqual(s['status'], 'BUILD') - self.assertEqual(s['metadata']['seq'], i) + self.assertEqual(s['metadata']['seq'], str(i)) def test_get_all_server_details_v1_1(self): req = webob.Request.blank('/v1.1/servers/detail') @@ -644,7 +882,7 @@ class ServersTest(test.TestCase): self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10') self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1') self.assertEqual(s['status'], 'BUILD') - self.assertEqual(s['metadata']['seq'], i) + self.assertEqual(s['metadata']['seq'], str(i)) def test_get_all_server_details_with_host(self): ''' @@ -654,12 +892,8 @@ class ServersTest(test.TestCase): instances - 2 on one host and 3 on another. ''' - def stub_instance(id, user_id=1): - return Instance(id=id, state=0, image_id=10, user_id=user_id, - display_name='server%s' % id, host='host%s' % (id % 2)) - def return_servers_with_host(context, user_id=1): - return [stub_instance(i) for i in xrange(5)] + return [stub_instance(i, 1, None, None, i % 2) for i in xrange(5)] self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers_with_host) @@ -677,7 +911,8 @@ class ServersTest(test.TestCase): self.assertEqual(s['id'], i) self.assertEqual(s['hostId'], host_ids[i % 2]) self.assertEqual(s['name'], 'server%d' % i) - self.assertEqual(s['imageId'], 10) + self.assertEqual(s['imageId'], '10') + self.assertEqual(s['flavorId'], 1) def test_server_pause(self): FLAGS.allow_admin_api = True @@ -774,16 +1009,6 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 501) def test_server_change_password_v1_1(self): - - class MockSetAdminPassword(object): - def __init__(self): - self.instance_id = None - self.password = None - - def __call__(self, context, instance_id, password): - self.instance_id = instance_id - self.password = password - mock_method = MockSetAdminPassword() self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method) body = {'changePassword': {'adminPass': '1234pass'}} @@ -842,15 +1067,175 @@ class ServersTest(test.TestCase): req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) - def test_server_rebuild(self): - body = dict(server=dict( - name='server_test', imageId=2, flavorId=2, metadata={}, - personality={})) + def test_server_rebuild_accepted(self): + body = { + "rebuild": { + "imageId": 2, + }, + } + req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(res.body, "") + + def test_server_rebuild_rejected_when_building(self): + body = { + "rebuild": { + "imageId": 2, + }, + } + + state = power_state.BUILDING + new_return_server = return_server_with_power_state(state) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 409) + + def test_server_rebuild_bad_entity(self): + body = { + "rebuild": { + }, + } + + req = webob.Request.blank('/v1.0/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_accepted_minimum_v1_1(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_rebuild_rejected_when_building_v1_1(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + }, + } + + state = power_state.BUILDING + new_return_server = return_server_with_power_state(state) + self.stubs.Set(nova.db.api, 'instance_get', new_return_server) + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 409) + + def test_server_rebuild_accepted_with_metadata_v1_1(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "metadata": { + "new": "metadata", + }, + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + + def test_server_rebuild_accepted_with_bad_metadata_v1_1(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "metadata": "stack", + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_bad_entity_v1_1(self): + body = { + "rebuild": { + "imageId": 2, + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_bad_personality_v1_1(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "personality": [{ + "path": "/path/to/file", + "contents": "INVALID b64", + }] + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_server_rebuild_personality_v1_1(self): + body = { + "rebuild": { + "imageRef": "http://localhost/images/2", + "personality": [{ + "path": "/path/to/file", + "contents": base64.b64encode("Test String"), + }] + }, + } + + req = webob.Request.blank('/v1.1/servers/1/action') + req.method = 'POST' + req.content_type = 'application/json' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) def test_delete_server_instance(self): req = webob.Request.blank('/v1.0/servers/1') @@ -973,6 +1358,24 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_shutdown_status(self): + new_server = return_server_with_power_state(power_state.SHUTDOWN) + self.stubs.Set(nova.db.api, 'instance_get', new_server) + req = webob.Request.blank('/v1.0/servers/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['status'], 'SHUTDOWN') + + def test_shutoff_status(self): + new_server = return_server_with_power_state(power_state.SHUTOFF) + self.stubs.Set(nova.db.api, 'instance_get', new_server) + req = webob.Request.blank('/v1.0/servers/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['status'], 'SHUTOFF') + class TestServerCreateRequestXMLDeserializer(unittest.TestCase): @@ -1254,6 +1657,19 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", request = self.deserializer.deserialize(serial_request) self.assertEqual(request, expected) + def test_request_xmlser_with_flavor_image_ref(self): + serial_request = """ + <server xmlns="http://docs.openstack.org/compute/api/v1.1" + name="new-server-test" + imageRef="http://localhost:8774/v1.1/images/1" + flavorRef="http://localhost:8774/v1.1/flavors/1"> + </server>""" + request = self.deserializer.deserialize(serial_request) + self.assertEquals(request["server"]["flavorRef"], + "http://localhost:8774/v1.1/flavors/1") + self.assertEquals(request["server"]["imageRef"], + "http://localhost:8774/v1.1/images/1") + class TestServerInstanceCreation(test.TestCase): @@ -1525,29 +1941,27 @@ class TestGetKernelRamdiskFromImage(test.TestCase): def test_not_ami(self): """Anything other than ami should return no kernel and no ramdisk""" - image_meta = {'id': 1, 'status': 'active', - 'properties': {'disk_format': 'vhd'}} + image_meta = {'id': 1, 'status': 'active', 'container_format': 'vhd'} kernel_id, ramdisk_id = self._get_k_r(image_meta) self.assertEqual(kernel_id, None) self.assertEqual(ramdisk_id, None) def test_ami_no_kernel(self): """If an ami is missing a kernel it should raise NotFound""" - image_meta = {'id': 1, 'status': 'active', - 'properties': {'disk_format': 'ami', 'ramdisk_id': 1}} + image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', + 'properties': {'ramdisk_id': 1}} self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_no_ramdisk(self): """If an ami is missing a ramdisk it should raise NotFound""" - image_meta = {'id': 1, 'status': 'active', - 'properties': {'disk_format': 'ami', 'kernel_id': 1}} + image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', + 'properties': {'kernel_id': 1}} self.assertRaises(exception.NotFound, self._get_k_r, image_meta) def test_ami_kernel_ramdisk_present(self): """Return IDs if both kernel and ramdisk are present""" - image_meta = {'id': 1, 'status': 'active', - 'properties': {'disk_format': 'ami', 'kernel_id': 1, - 'ramdisk_id': 2}} + image_meta = {'id': 1, 'status': 'active', 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 2}} kernel_id, ramdisk_id = self._get_k_r(image_meta) self.assertEqual(kernel_id, 1) self.assertEqual(ramdisk_id, 2) diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py index 2640a4ddb..fd8d50904 100644 --- a/nova/tests/api/openstack/test_versions.py +++ b/nova/tests/api/openstack/test_versions.py @@ -47,8 +47,7 @@ class VersionsTest(test.TestCase): { "rel": "self", "href": "http://localhost/v1.1", - } - ], + }], }, { "id": "v1.0", @@ -57,8 +56,7 @@ class VersionsTest(test.TestCase): { "rel": "self", "href": "http://localhost/v1.0", - } - ], + }], }, ] self.assertEqual(versions, expected) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index a3f191aaa..fa2e05033 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -20,6 +20,8 @@ import json import nova.db from nova import context +from nova import crypto +from nova import exception from nova import flags from nova import test from nova.api.openstack import zones @@ -75,10 +77,22 @@ def zone_get_all_db(context): ] -def zone_capabilities(method, context, params): +def zone_capabilities(method, context): return dict() +GLOBAL_BUILD_PLAN = [ + dict(name='host1', weight=10, ip='10.0.0.1', zone='zone1'), + dict(name='host2', weight=9, ip='10.0.0.2', zone='zone2'), + dict(name='host3', weight=8, ip='10.0.0.3', zone='zone3'), + dict(name='host4', weight=7, ip='10.0.0.4', zone='zone4'), + ] + + +def zone_select(context, specs): + return GLOBAL_BUILD_PLAN + + class ZonesTest(test.TestCase): def setUp(self): super(ZonesTest, self).setUp() @@ -190,3 +204,31 @@ class ZonesTest(test.TestCase): self.assertEqual(res_dict['zone']['name'], 'darksecret') self.assertEqual(res_dict['zone']['cap1'], 'a;b') self.assertEqual(res_dict['zone']['cap2'], 'c;d') + + def test_zone_select(self): + FLAGS.build_plan_encryption_key = 'c286696d887c9aa0611bbb3e2025a45a' + self.stubs.Set(api, 'select', zone_select) + + req = webob.Request.blank('/v1.0/zones/select') + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res.status_int, 200) + + self.assertTrue('weights' in res_dict) + + for item in res_dict['weights']: + blob = item['blob'] + decrypt = crypto.decryptor(FLAGS.build_plan_encryption_key) + secret_item = json.loads(decrypt(blob)) + found = False + for original_item in GLOBAL_BUILD_PLAN: + if original_item['name'] != secret_item['name']: + continue + found = True + for key in ('weight', 'ip', 'zone'): + self.assertEqual(secret_item[key], original_item[key]) + + self.assertTrue(found) + self.assertEqual(len(item), 2) + self.assertTrue('weight' in item) diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py index 1ecdd1cfb..5820ecdc2 100644 --- a/nova/tests/api/test_wsgi.py +++ b/nova/tests/api/test_wsgi.py @@ -136,6 +136,12 @@ class RequestTest(test.TestCase): request.body = "asdf<br />" self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type) + def test_request_content_type_with_charset(self): + request = wsgi.Request.blank('/tests/123') + request.headers["Content-Type"] = "application/json; charset=UTF-8" + result = request.get_content_type() + self.assertEqual(result, "application/json") + def test_content_type_from_accept_xml(self): request = wsgi.Request.blank('/tests/123') request.headers["Accept"] = "application/xml" diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 7ddfe377a..8bdea359a 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -28,29 +28,34 @@ def stub_out_db_instance_api(stubs, injected=True): """Stubs out the db API for creating Instances.""" INSTANCE_TYPES = { - 'm1.tiny': dict(memory_mb=512, + 'm1.tiny': dict(id=2, + memory_mb=512, vcpus=1, local_gb=0, flavorid=1, rxtx_cap=1), - 'm1.small': dict(memory_mb=2048, + 'm1.small': dict(id=5, + memory_mb=2048, vcpus=1, local_gb=20, flavorid=2, rxtx_cap=2), 'm1.medium': - dict(memory_mb=4096, + dict(id=1, + memory_mb=4096, vcpus=2, local_gb=40, flavorid=3, rxtx_cap=3), - 'm1.large': dict(memory_mb=8192, + 'm1.large': dict(id=3, + memory_mb=8192, vcpus=4, local_gb=80, flavorid=4, rxtx_cap=4), 'm1.xlarge': - dict(memory_mb=16384, + dict(id=4, + memory_mb=16384, vcpus=8, local_gb=160, flavorid=5, @@ -107,13 +112,18 @@ def stub_out_db_instance_api(stubs, injected=True): def fake_instance_type_get_by_name(context, name): return INSTANCE_TYPES[name] + def fake_instance_type_get_by_id(context, id): + for name, inst_type in INSTANCE_TYPES.iteritems(): + if str(inst_type['id']) == str(id): + return inst_type + return None + def fake_network_get_by_instance(context, instance_id): # Even instance numbers are on vlan networks if instance_id % 2 == 0: return FakeModel(vlan_network_fields) else: return FakeModel(flat_network_fields) - return FakeModel(network_fields) def fake_network_get_all_by_instance(context, instance_id): # Even instance numbers are on vlan networks @@ -136,6 +146,7 @@ def stub_out_db_instance_api(stubs, injected=True): fake_network_get_all_by_instance) stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all) stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name) + stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id) stubs.Set(db, 'instance_get_fixed_address', fake_instance_get_fixed_address) stubs.Set(db, 'instance_get_fixed_address_v6', diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 5d7ca98b5..ecefc464a 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -21,24 +21,24 @@ from nova import flags FLAGS = flags.FLAGS flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver' -FLAGS.connection_type = 'fake' -FLAGS.fake_rabbit = True +FLAGS['volume_driver'].SetDefault('nova.volume.driver.FakeISCSIDriver') +FLAGS['connection_type'].SetDefault('fake') +FLAGS['fake_rabbit'].SetDefault(True) flags.DECLARE('auth_driver', 'nova.auth.manager') -FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' +FLAGS['auth_driver'].SetDefault('nova.auth.dbdriver.DbDriver') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('fake_network', 'nova.network.manager') -FLAGS.network_size = 8 -FLAGS.num_networks = 2 -FLAGS.fake_network = True -FLAGS.image_service = 'nova.image.local.LocalImageService' +FLAGS['network_size'].SetDefault(8) +FLAGS['num_networks'].SetDefault(2) +FLAGS['fake_network'].SetDefault(True) +FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') -FLAGS.num_shelves = 2 -FLAGS.blades_per_shelf = 4 -FLAGS.iscsi_num_targets = 8 -FLAGS.verbose = True -FLAGS.sqlite_db = "tests.sqlite" -FLAGS.use_ipv6 = True +FLAGS['num_shelves'].SetDefault(2) +FLAGS['blades_per_shelf'].SetDefault(4) +FLAGS['iscsi_num_targets'].SetDefault(8) +FLAGS['verbose'].SetDefault(True) +FLAGS['sqlite_db'].SetDefault("tests.sqlite") +FLAGS['use_ipv6'].SetDefault(True) diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 9d0b14613..109905ded 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -209,17 +209,17 @@ class TestMutatorDateTimeTests(BaseGlanceTest): self.assertDateTimesEmpty(image_meta) def test_update_handles_datetimes(self): + self.client.images = {'image1': self._make_datetime_fixture()} self.client.update_response = self._make_datetime_fixture() - dummy_id = 'dummy_id' dummy_meta = {} - image_meta = self.service.update(self.context, 'dummy_id', dummy_meta) + image_meta = self.service.update(self.context, 'image1', dummy_meta) self.assertDateTimesFilled(image_meta) def test_update_handles_none_datetimes(self): + self.client.images = {'image1': self._make_datetime_fixture()} self.client.update_response = self._make_none_datetime_fixture() - dummy_id = 'dummy_id' dummy_meta = {} - image_meta = self.service.update(self.context, 'dummy_id', dummy_meta) + image_meta = self.service.update(self.context, 'image1', dummy_meta) self.assertDateTimesEmpty(image_meta) def _make_datetime_fixture(self): diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 2e5d67017..bc98921f0 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -160,7 +160,7 @@ class _IntegratedTestBase(test.TestCase): #self.start_service('network') self.start_service('scheduler') - self.auth_url = self._start_api_service() + self._start_api_service() self.context = IntegratedUnitTestContext(self.auth_url) @@ -174,8 +174,10 @@ class _IntegratedTestBase(test.TestCase): if not api_service: raise Exception("API Service was None") - auth_url = 'http://localhost:8774/v1.1' - return auth_url + self.api_service = api_service + + host, port = api_service.get_socket_info('osapi') + self.auth_url = 'http://%s:%s/v1.1' % (host, port) def tearDown(self): self.context.cleanup() @@ -184,6 +186,11 @@ class _IntegratedTestBase(test.TestCase): def _get_flags(self): """An opportunity to setup flags, before the services are started.""" f = {} + + # Auto-assign ports to allow concurrent tests + f['ec2_listen_port'] = 0 + f['osapi_listen_port'] = 0 + f['image_service'] = 'nova.image.fake.FakeImageService' f['fake_network'] = True return f diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 749ea8955..e89d0100a 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -134,50 +134,50 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # Should be gone self.assertFalse(found_server) -# TODO(justinsb): Enable this unit test when the metadata bug is fixed -# def test_create_server_with_metadata(self): -# """Creates a server with metadata""" -# -# # Build the server data gradually, checking errors along the way -# server = self._build_minimal_create_server_request() -# -# for metadata_count in range(30): -# metadata = {} -# for i in range(metadata_count): -# metadata['key_%s' % i] = 'value_%s' % i -# server['metadata'] = metadata -# -# post = {'server': server} -# created_server = self.api.post_server(post) -# LOG.debug("created_server: %s" % created_server) -# self.assertTrue(created_server['id']) -# created_server_id = created_server['id'] -# # Reenable when bug fixed -# # self.assertEqual(metadata, created_server.get('metadata')) -# -# # Check it's there -# found_server = self.api.get_server(created_server_id) -# self.assertEqual(created_server_id, found_server['id']) -# self.assertEqual(metadata, found_server.get('metadata')) -# -# # The server should also be in the all-servers details list -# servers = self.api.get_servers(detail=True) -# server_map = dict((server['id'], server) for server in servers) -# found_server = server_map.get(created_server_id) -# self.assertTrue(found_server) -# # Details do include metadata -# self.assertEqual(metadata, found_server.get('metadata')) -# -# # The server should also be in the all-servers summary list -# servers = self.api.get_servers(detail=False) -# server_map = dict((server['id'], server) for server in servers) -# found_server = server_map.get(created_server_id) -# self.assertTrue(found_server) -# # Summary should not include metadata -# self.assertFalse(found_server.get('metadata')) -# -# # Cleanup -# self._delete_server(created_server_id) + def test_create_server_with_metadata(self): + """Creates a server with metadata.""" + + # Build the server data gradually, checking errors along the way + server = self._build_minimal_create_server_request() + + metadata = {} + for i in range(30): + metadata['key_%s' % i] = 'value_%s' % i + + server['metadata'] = metadata + + post = {'server': server} + created_server = self.api.post_server(post) + LOG.debug("created_server: %s" % created_server) + self.assertTrue(created_server['id']) + created_server_id = created_server['id'] + + # Reenable when bug fixed + self.assertEqual(metadata, created_server.get('metadata')) + # Check it's there + + found_server = self.api.get_server(created_server_id) + self.assertEqual(created_server_id, found_server['id']) + self.assertEqual(metadata, found_server.get('metadata')) + + # The server should also be in the all-servers details list + servers = self.api.get_servers(detail=True) + server_map = dict((server['id'], server) for server in servers) + found_server = server_map.get(created_server_id) + self.assertTrue(found_server) + # Details do include metadata + self.assertEqual(metadata, found_server.get('metadata')) + + # The server should also be in the all-servers summary list + servers = self.api.get_servers(detail=False) + server_map = dict((server['id'], server) for server in servers) + found_server = server_map.get(created_server_id) + self.assertTrue(found_server) + # Summary should not include metadata + self.assertFalse(found_server.get('metadata')) + + # Cleanup + self._delete_server(created_server_id) if __name__ == "__main__": diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py index 988a1de72..b06271c99 100644 --- a/nova/tests/network/base.py +++ b/nova/tests/network/base.py @@ -25,6 +25,7 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import ipv6 from nova import log as logging from nova import test from nova import utils @@ -117,15 +118,15 @@ class NetworkTestCase(test.TestCase): context.get_admin_context(), instance_ref['id']) self.assertEqual(instance_ref['mac_address'], - utils.to_mac(address_v6)) + ipv6.to_mac(address_v6)) instance_ref2 = db.fixed_ip_get_instance_v6( context.get_admin_context(), address_v6) self.assertEqual(instance_ref['id'], instance_ref2['id']) self.assertEqual(address_v6, - utils.to_global_ipv6( - network_ref['cidr_v6'], - instance_ref['mac_address'])) + ipv6.to_global(network_ref['cidr_v6'], + instance_ref['mac_address'], + 'test')) self._deallocate_address(0, address) db.instance_destroy(context.get_admin_context(), instance_ref['id']) diff --git a/nova/tests/public_key/dummy.fingerprint b/nova/tests/public_key/dummy.fingerprint new file mode 100644 index 000000000..715bca27a --- /dev/null +++ b/nova/tests/public_key/dummy.fingerprint @@ -0,0 +1 @@ +1c:87:d1:d9:32:fd:62:3c:78:2b:c0:ad:c0:15:88:df diff --git a/nova/tests/public_key/dummy.pub b/nova/tests/public_key/dummy.pub new file mode 100644 index 000000000..d4cf2bc0d --- /dev/null +++ b/nova/tests/public_key/dummy.pub @@ -0,0 +1 @@ +ssh-dss AAAAB3NzaC1kc3MAAACBAMGJlY9XEIm2X234pdO5yFWMp2JuOQx8U0E815IVXhmKxYCBK9ZakgZOIQmPbXoGYyV+mziDPp6HJ0wKYLQxkwLEFr51fAZjWQvRss0SinURRuLkockDfGFtD4pYJthekr/rlqMKlBSDUSpGq8jUWW60UJ18FGooFpxR7ESqQRx/AAAAFQC96LRglaUeeP+E8U/yblEJocuiWwAAAIA3XiMR8Skiz/0aBm5K50SeQznQuMJTyzt9S9uaz5QZWiFu69hOyGSFGw8fqgxEkXFJIuHobQQpGYQubLW0NdaYRqyE/Vud3JUJUb8Texld6dz8vGemyB5d1YvtSeHIo8/BGv2msOqR3u5AZTaGCBD9DhpSGOKHEdNjTtvpPd8S8gAAAIBociGZ5jf09iHLVENhyXujJbxfGRPsyNTyARJfCOGl0oFV6hEzcQyw8U/ePwjgvjc2UizMWLl8tsb2FXKHRdc2v+ND3Us+XqKQ33X3ADP4FZ/+Oj213gMyhCmvFTP0u5FmHog9My4CB7YcIWRuUR42WlhQ2IfPvKwUoTk3R+T6Og== www-data@mk diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index fa0e56597..7c0331eff 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -28,10 +28,12 @@ import StringIO import webob from nova import context +from nova import exception from nova import test from nova.api import ec2 -from nova.api.ec2 import cloud from nova.api.ec2 import apirequest +from nova.api.ec2 import cloud +from nova.api.ec2 import ec2utils from nova.auth import manager @@ -101,6 +103,21 @@ class XmlConversionTestCase(test.TestCase): self.assertEqual(conv('-0'), 0) +class Ec2utilsTestCase(test.TestCase): + def test_ec2_id_to_id(self): + self.assertEqual(ec2utils.ec2_id_to_id('i-0000001e'), 30) + self.assertEqual(ec2utils.ec2_id_to_id('ami-1d'), 29) + + def test_bad_ec2_id(self): + self.assertRaises(exception.InvalidEc2Id, + ec2utils.ec2_id_to_id, + 'badone') + + def test_id_to_ec2_id(self): + self.assertEqual(ec2utils.id_to_ec2_id(30), 'i-0000001e') + self.assertEqual(ec2utils.id_to_ec2_id(29, 'ami-%08x'), 'ami-0000001d') + + class ApiEc2TestCase(test.TestCase): """Unit test for the cloud controller on an EC2 API""" def setUp(self): @@ -207,6 +224,29 @@ class ApiEc2TestCase(test.TestCase): self.manager.delete_project(project) self.manager.delete_user(user) + def test_create_duplicate_key_pair(self): + """Test that, after successfully generating a keypair, + requesting a second keypair with the same name fails sanely""" + self.expect_http() + self.mox.ReplayAll() + keyname = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \ + for x in range(random.randint(4, 8))) + user = self.manager.create_user('fake', 'fake', 'fake') + project = self.manager.create_project('fake', 'fake', 'fake') + # NOTE(vish): create depends on pool, so call helper directly + self.ec2.create_key_pair('test') + + try: + self.ec2.create_key_pair('test') + except EC2ResponseError, e: + if e.code == 'KeyPairExists': + pass + else: + self.fail("Unexpected EC2ResponseError: %s " + "(expected KeyPairExists)" % e.code) + else: + self.fail('Exception not raised.') + def test_get_all_security_groups(self): """Test that we can retrieve security groups""" self.expect_http() diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index f8a1b1564..f02dd94b7 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -101,9 +101,43 @@ class _AuthManagerBaseTestCase(test.TestCase): self.assertEqual('private-party', u.access) def test_004_signature_is_valid(self): - #self.assertTrue(self.manager.authenticate(**boto.generate_url ...? )) - pass - #raise NotImplementedError + with user_generator(self.manager, name='admin', secret='admin', + access='admin'): + with project_generator(self.manager, name="admin", + manager_user='admin'): + accesskey = 'admin:admin' + expected_result = (self.manager.get_user('admin'), + self.manager.get_project('admin')) + # captured sig and query string using boto 1.9b/euca2ools 1.2 + sig = 'd67Wzd9Bwz8xid9QU+lzWXcF2Y3tRicYABPJgrqfrwM=' + auth_params = {'AWSAccessKeyId': 'admin:admin', + 'Action': 'DescribeAvailabilityZones', + 'SignatureMethod': 'HmacSHA256', + 'SignatureVersion': '2', + 'Timestamp': '2011-04-22T11:29:29', + 'Version': '2009-11-30'} + self.assertTrue(expected_result, self.manager.authenticate( + accesskey, + sig, + auth_params, + 'GET', + '127.0.0.1:8773', + '/services/Cloud/')) + # captured sig and query string using RightAWS 1.10.0 + sig = 'ECYLU6xdFG0ZqRVhQybPJQNJ5W4B9n8fGs6+/fuGD2c=' + auth_params = {'AWSAccessKeyId': 'admin:admin', + 'Action': 'DescribeAvailabilityZones', + 'SignatureMethod': 'HmacSHA256', + 'SignatureVersion': '2', + 'Timestamp': '2011-04-22T11:29:49.000Z', + 'Version': '2008-12-01'} + self.assertTrue(expected_result, self.manager.authenticate( + accesskey, + sig, + auth_params, + 'GET', + '127.0.0.1', + '/services/Cloud')) def test_005_can_get_credentials(self): return diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 5cb969979..54c0454de 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -36,6 +36,7 @@ from nova import rpc from nova import service from nova import test from nova import utils +from nova import exception from nova.auth import manager from nova.compute import power_state from nova.api.ec2 import cloud @@ -247,6 +248,57 @@ class CloudTestCase(test.TestCase): self.assertRaises(NotFound, describe_images, self.context, ['ami-fake']) + def test_describe_image_attribute(self): + describe_image_attribute = self.cloud.describe_image_attribute + + def fake_show(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'is_public': True} + + self.stubs.Set(local.LocalImageService, 'show', fake_show) + self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + result = describe_image_attribute(self.context, 'ami-00000001', + 'launchPermission') + self.assertEqual([{'group': 'all'}], result['launchPermission']) + + def test_modify_image_attribute(self): + modify_image_attribute = self.cloud.modify_image_attribute + + def fake_show(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine'}, 'is_public': False} + + def fake_update(meh, context, image_id, metadata, data=None): + return metadata + + self.stubs.Set(local.LocalImageService, 'show', fake_show) + self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + self.stubs.Set(local.LocalImageService, 'update', fake_update) + result = modify_image_attribute(self.context, 'ami-00000001', + 'launchPermission', 'add', + user_group=['all']) + self.assertEqual(True, result['is_public']) + + def test_deregister_image(self): + deregister_image = self.cloud.deregister_image + + def fake_delete(self, context, id): + return None + + self.stubs.Set(local.LocalImageService, 'delete', fake_delete) + # valid image + result = deregister_image(self.context, 'ami-00000001') + self.assertEqual(result['imageId'], 'ami-00000001') + # invalid image + self.stubs.UnsetAll() + + def fake_detail_empty(self, context): + return [] + + self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty) + self.assertRaises(exception.ImageNotFound, deregister_image, + self.context, 'ami-bad001') + def test_console_output(self): instance_type = FLAGS.default_instance_type max_count = 1 @@ -258,7 +310,7 @@ class CloudTestCase(test.TestCase): instance_id = rv['instancesSet'][0]['instanceId'] output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) - self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT') + self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') # TODO(soren): We need this until we can stop polling in the rpc code # for unit tests. greenthread.sleep(0.3) @@ -302,44 +354,52 @@ class CloudTestCase(test.TestCase): self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys)) self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys)) + def test_import_public_key(self): + # test when user provides all values + result1 = self.cloud.import_public_key(self.context, + 'testimportkey1', + 'mytestpubkey', + 'mytestfprint') + self.assertTrue(result1) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey1') + self.assertEqual('mytestpubkey', keydata['public_key']) + self.assertEqual('mytestfprint', keydata['fingerprint']) + # test when user omits fingerprint + pubkey_path = os.path.join(os.path.dirname(__file__), 'public_key') + f = open(pubkey_path + '/dummy.pub', 'r') + dummypub = f.readline().rstrip() + f.close + f = open(pubkey_path + '/dummy.fingerprint', 'r') + dummyfprint = f.readline().rstrip() + f.close + result2 = self.cloud.import_public_key(self.context, + 'testimportkey2', + dummypub) + self.assertTrue(result2) + keydata = db.key_pair_get(self.context, + self.context.user.id, + 'testimportkey2') + self.assertEqual(dummypub, keydata['public_key']) + self.assertEqual(dummyfprint, keydata['fingerprint']) + def test_delete_key_pair(self): self._create_key('test') self.cloud.delete_key_pair(self.context, 'test') - def test_run_instances(self): - if FLAGS.connection_type == 'fake': - LOG.debug(_("Can't test instances without a real virtual env.")) - return - image_id = FLAGS.default_image - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': image_id, - 'instance_type': instance_type, - 'max_count': max_count} - rv = self.cloud.run_instances(self.context, **kwargs) - # TODO: check for proper response - instance_id = rv['reservationSet'][0].keys()[0] - instance = rv['reservationSet'][0][instance_id][0] - LOG.debug(_("Need to watch instance %s until it's running..."), - instance['instance_id']) - while True: - greenthread.sleep(1) - info = self.cloud._get_instance(instance['instance_id']) - LOG.debug(info['state']) - if info['state'] == power_state.RUNNING: - break - self.assert_(rv) - - if FLAGS.connection_type != 'fake': - time.sleep(45) # Should use boto for polling here - for reservations in rv['reservationSet']: - # for res_id in reservations.keys(): - # LOG.debug(reservations[res_id]) - # for instance in reservations[res_id]: - for instance in reservations[reservations.keys()[0]]: - instance_id = instance['instance_id'] - LOG.debug(_("Terminating instance %s"), instance_id) - rv = self.compute.terminate_instance(instance_id) + def test_terminate_instances(self): + inst1 = db.instance_create(self.context, {'reservation_id': 'a', + 'image_id': 1, + 'host': 'host1'}) + terminate_instances = self.cloud.terminate_instances + # valid instance_id + result = terminate_instances(self.context, ['i-00000001']) + self.assertTrue(result) + # non-existing instance_id + self.assertRaises(exception.InstanceNotFound, terminate_instances, + self.context, ['i-2']) + db.instance_destroy(self.context, inst1['id']) def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 1b0f426d2..9170837b6 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -21,6 +21,7 @@ Tests For Compute import datetime import mox +import stubout from nova import compute from nova import context @@ -52,6 +53,10 @@ class FakeTime(object): self.counter += t +def nop_report_driver_status(self): + pass + + class ComputeTestCase(test.TestCase): """Test case for compute""" def setUp(self): @@ -84,7 +89,8 @@ class ComputeTestCase(test.TestCase): inst['launch_time'] = '10' inst['user_id'] = self.user.id inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + inst['instance_type_id'] = type_id inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst.update(params) @@ -132,7 +138,7 @@ class ComputeTestCase(test.TestCase): cases = [dict(), dict(display_name=None)] for instance in cases: ref = self.compute_api.create(self.context, - FLAGS.default_instance_type, None, **instance) + instance_types.get_default_instance_type(), None, **instance) try: self.assertNotEqual(ref[0]['display_name'], None) finally: @@ -143,7 +149,7 @@ class ComputeTestCase(test.TestCase): group = self._create_group() ref = self.compute_api.create( self.context, - instance_type=FLAGS.default_instance_type, + instance_type=instance_types.get_default_instance_type(), image_id=None, security_group=['testgroup']) try: @@ -161,7 +167,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, - instance_type=FLAGS.default_instance_type, + instance_type=instance_types.get_default_instance_type(), image_id=None, security_group=['testgroup']) try: @@ -177,7 +183,7 @@ class ComputeTestCase(test.TestCase): ref = self.compute_api.create( self.context, - instance_type=FLAGS.default_instance_type, + instance_type=instance_types.get_default_instance_type(), image_id=None, security_group=['testgroup']) @@ -328,6 +334,28 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) + def test_finish_resize(self): + """Contrived test to ensure finish_resize doesn't raise anything""" + + def fake(*args, **kwargs): + pass + + self.stubs.Set(self.compute.driver, 'finish_resize', fake) + context = self.context.elevated() + instance_id = self._create_instance() + self.compute.prep_resize(context, instance_id, 1) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + try: + self.compute.finish_resize(context, instance_id, + int(migration_ref['id']), {}) + except KeyError, e: + # Only catch key errors. We want other reasons for the test to + # fail to actually error out so we don't obscure anything + self.fail() + + self.compute.terminate_instance(self.context, instance_id) + def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() @@ -359,8 +387,9 @@ class ComputeTestCase(test.TestCase): instance_id = self._create_instance() self.compute.run_instance(self.context, instance_id) + inst_type = instance_types.get_instance_type_by_name('m1.xlarge') db.instance_update(self.context, instance_id, - {'instance_type': 'm1.xlarge'}) + {'instance_type_id': inst_type['id']}) self.assertRaises(exception.ApiError, self.compute_api.resize, context, instance_id, 1) @@ -380,8 +409,8 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(context, instance_id) def test_get_by_flavor_id(self): - type = instance_types.get_by_flavor_id(1) - self.assertEqual(type, 'm1.tiny') + type = instance_types.get_instance_type_by_flavor_id(1) + self.assertEqual(type['name'], 'm1.tiny') def test_resize_same_source_fails(self): """Ensure instance fails to migrate when source and destination are @@ -647,6 +676,10 @@ class ComputeTestCase(test.TestCase): def test_run_kill_vm(self): """Detect when a vm is terminated behind the scenes""" + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(compute_manager.ComputeManager, + '_report_driver_status', nop_report_driver_status) + instance_id = self._create_instance() self.compute.run_instance(self.context, instance_id) @@ -664,4 +697,5 @@ class ComputeTestCase(test.TestCase): instances = db.instance_get_all(context.get_admin_context()) LOG.info(_("After force-killing instances: %s"), instances) - self.assertEqual(len(instances), 0) + self.assertEqual(len(instances), 1) + self.assertEqual(power_state.SHUTOFF, instances[0]['state']) diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index d47c70d88..1a9a867ee 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -62,7 +62,7 @@ class ConsoleTestCase(test.TestCase): inst['launch_time'] = '10' inst['user_id'] = self.user.id inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' + inst['instance_type_id'] = 1 inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py new file mode 100644 index 000000000..945d78794 --- /dev/null +++ b/nova/tests/test_crypto.py @@ -0,0 +1,48 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests for Crypto module. +""" + +from nova import crypto +from nova import test + + +class SymmetricKeyTestCase(test.TestCase): + """Test case for Encrypt/Decrypt""" + def test_encrypt_decrypt(self): + key = 'c286696d887c9aa0611bbb3e2025a45a' + plain_text = "The quick brown fox jumped over the lazy dog." + + # No IV supplied (all 0's) + encrypt = crypto.encryptor(key) + cipher_text = encrypt(plain_text) + self.assertNotEquals(plain_text, cipher_text) + + decrypt = crypto.decryptor(key) + plain = decrypt(cipher_text) + + self.assertEquals(plain_text, plain) + + # IV supplied ... + iv = '562e17996d093d28ddb3ba695a2e6f58' + encrypt = crypto.encryptor(key, iv) + cipher_text = encrypt(plain_text) + self.assertNotEquals(plain_text, cipher_text) + + decrypt = crypto.decryptor(key, iv) + plain = decrypt(cipher_text) + + self.assertEquals(plain_text, plain) diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py new file mode 100644 index 000000000..4d3b9cc73 --- /dev/null +++ b/nova/tests/test_exception.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test +from nova import exception + + +class ApiErrorTestCase(test.TestCase): + def test_return_valid_error(self): + # without 'code' arg + err = exception.ApiError('fake error') + self.assertEqual(err.__str__(), 'fake error') + self.assertEqual(err.code, None) + self.assertEqual(err.msg, 'fake error') + # with 'code' arg + err = exception.ApiError('fake error', 'blah code') + self.assertEqual(err.__str__(), 'blah code: fake error') + self.assertEqual(err.code, 'blah code') + self.assertEqual(err.msg, 'fake error') diff --git a/nova/tests/test_flags.py b/nova/tests/test_flags.py index 707300fcf..05319d91f 100644 --- a/nova/tests/test_flags.py +++ b/nova/tests/test_flags.py @@ -91,6 +91,20 @@ class FlagsTestCase(test.TestCase): self.assert_('runtime_answer' in self.global_FLAGS) self.assertEqual(self.global_FLAGS.runtime_answer, 60) + def test_long_vs_short_flags(self): + flags.DEFINE_string('duplicate_answer_long', 'val', 'desc', + flag_values=self.global_FLAGS) + argv = ['flags_test', '--duplicate_answer=60', 'extra_arg'] + args = self.global_FLAGS(argv) + + self.assert_('duplicate_answer' not in self.global_FLAGS) + self.assert_(self.global_FLAGS.duplicate_answer_long, 60) + + flags.DEFINE_integer('duplicate_answer', 60, 'desc', + flag_values=self.global_FLAGS) + self.assertEqual(self.global_FLAGS.duplicate_answer, 60) + self.assertEqual(self.global_FLAGS.duplicate_answer_long, 'val') + def test_flag_leak_left(self): self.assertEqual(FLAGS.flags_unittest, 'foo') FLAGS.flags_unittest = 'bar' diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py new file mode 100644 index 000000000..c029d41e6 --- /dev/null +++ b/nova/tests/test_host_filter.py @@ -0,0 +1,208 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Scheduler Host Filter Drivers. +""" + +import json + +from nova import exception +from nova import flags +from nova import test +from nova.scheduler import host_filter + +FLAGS = flags.FLAGS + + +class FakeZoneManager: + pass + + +class HostFilterTestCase(test.TestCase): + """Test case for host filter drivers.""" + + def _host_caps(self, multiplier): + # Returns host capabilities in the following way: + # host1 = memory:free 10 (100max) + # disk:available 100 (1000max) + # hostN = memory:free 10 + 10N + # disk:available 100 + 100N + # in other words: hostN has more resources than host0 + # which means ... don't go above 10 hosts. + return {'host_name-description': 'XenServer %s' % multiplier, + 'host_hostname': 'xs-%s' % multiplier, + 'host_memory_total': 100, + 'host_memory_overhead': 10, + 'host_memory_free': 10 + multiplier * 10, + 'host_memory_free-computed': 10 + multiplier * 10, + 'host_other-config': {}, + 'host_ip_address': '192.168.1.%d' % (100 + multiplier), + 'host_cpu_info': {}, + 'disk_available': 100 + multiplier * 100, + 'disk_total': 1000, + 'disk_used': 0, + 'host_uuid': 'xxx-%d' % multiplier, + 'host_name-label': 'xs-%s' % multiplier} + + def setUp(self): + self.old_flag = FLAGS.default_host_filter_driver + FLAGS.default_host_filter_driver = \ + 'nova.scheduler.host_filter.AllHostsFilter' + self.instance_type = dict(name='tiny', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=1, + swap=500, + rxtx_quota=30000, + rxtx_cap=200) + + self.zone_manager = FakeZoneManager() + states = {} + for x in xrange(10): + states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} + self.zone_manager.service_states = states + + def tearDown(self): + FLAGS.default_host_filter_driver = self.old_flag + + def test_choose_driver(self): + # Test default driver ... + driver = host_filter.choose_driver() + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.AllHostsFilter') + # Test valid driver ... + driver = host_filter.choose_driver( + 'nova.scheduler.host_filter.FlavorFilter') + self.assertEquals(driver._full_name(), + 'nova.scheduler.host_filter.FlavorFilter') + # Test invalid driver ... + try: + host_filter.choose_driver('does not exist') + self.fail("Should not find driver") + except exception.SchedulerHostFilterDriverNotFound: + pass + + def test_all_host_driver(self): + driver = host_filter.AllHostsFilter() + cooked = driver.instance_type_to_filter(self.instance_type) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(10, len(hosts)) + for host, capabilities in hosts: + self.assertTrue(host.startswith('host')) + + def test_flavor_driver(self): + driver = host_filter.FlavorFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + def test_json_driver(self): + driver = host_filter.JsonFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = driver.instance_type_to_filter(self.instance_type) + self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + hosts = driver.filter_hosts(self.zone_manager, cooked) + self.assertEquals(6, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + self.assertEquals('host05', just_hosts[0]) + self.assertEquals('host10', just_hosts[5]) + + # Try some custom queries + + raw = ['or', + ['and', + ['<', '$compute.host_memory_free', 30], + ['<', '$compute.disk_available', 300] + ], + ['and', + ['>', '$compute.host_memory_free', 70], + ['>', '$compute.disk_available', 700] + ] + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['not', + ['=', '$compute.host_memory_free', 30], + ] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(9, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] + cooked = json.dumps(raw) + hosts = driver.filter_hosts(self.zone_manager, cooked) + + self.assertEquals(5, len(hosts)) + just_hosts = [host for host, caps in hosts] + just_hosts.sort() + for index, host in zip([2, 4, 6, 8, 10], just_hosts): + self.assertEquals('host%02d' % index, host) + + # Try some bogus input ... + raw = ['unknown command', ] + cooked = json.dumps(raw) + try: + driver.filter_hosts(self.zone_manager, cooked) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([]))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({}))) + self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps( + ['not', True, False, True, False] + ))) + + try: + driver.filter_hosts(self.zone_manager, json.dumps( + 'not', True, False, True, False + )) + self.fail("Should give KeyError") + except KeyError, e: + pass + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$foo', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', '$.....', 100] + ))) + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]] + ))) + + self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps( + ['=', {}, ['>', '$missing....foo']] + ))) diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index edc538879..ef271518c 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -40,7 +40,11 @@ class InstanceTypeTestCase(test.TestCase): max_flavorid = session.query(models.InstanceTypes).\ order_by("flavorid desc").\ first() + max_id = session.query(models.InstanceTypes).\ + order_by("id desc").\ + first() self.flavorid = max_flavorid["flavorid"] + 1 + self.id = max_id["id"] + 1 self.name = str(int(time.time())) def test_instance_type_create_then_delete(self): @@ -53,7 +57,7 @@ class InstanceTypeTestCase(test.TestCase): 'instance type was not created') instance_types.destroy(self.name) self.assertEqual(1, - instance_types.get_instance_type(self.name)["deleted"]) + instance_types.get_instance_type(self.id)["deleted"]) self.assertEqual(starting_inst_list, instance_types.get_all_types()) instance_types.purge(self.name) self.assertEqual(len(starting_inst_list), @@ -71,16 +75,25 @@ class InstanceTypeTestCase(test.TestCase): def test_invalid_create_args_should_fail(self): """Ensures that instance type creation fails with invalid args""" self.assertRaises( - exception.InvalidInputException, + exception.InvalidInput, instance_types.create, self.name, 0, 1, 120, self.flavorid) self.assertRaises( - exception.InvalidInputException, + exception.InvalidInput, instance_types.create, self.name, 256, -1, 120, self.flavorid) self.assertRaises( - exception.InvalidInputException, + exception.InvalidInput, instance_types.create, self.name, 256, 1, "aa", self.flavorid) def test_non_existant_inst_type_shouldnt_delete(self): """Ensures that instance type creation fails with invalid args""" self.assertRaises(exception.ApiError, instance_types.destroy, "sfsfsdfdfs") + + def test_repeated_inst_types_should_raise_api_error(self): + """Ensures that instance duplicates raises ApiError""" + new_name = self.name + "dup" + instance_types.create(new_name, 256, 1, 120, self.flavorid + 1) + instance_types.destroy(new_name) + self.assertRaises( + exception.ApiError, + instance_types.create, new_name, 256, 1, 120, self.flavorid) diff --git a/nova/tests/test_ipv6.py b/nova/tests/test_ipv6.py new file mode 100644 index 000000000..11dc2ec98 --- /dev/null +++ b/nova/tests/test_ipv6.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test suite for IPv6.""" + +from nova import flags +from nova import ipv6 +from nova import log as logging +from nova import test + +LOG = logging.getLogger('nova.tests.test_ipv6') + +FLAGS = flags.FLAGS + +import sys + + +class IPv6RFC2462TestCase(test.TestCase): + """Unit tests for IPv6 rfc2462 backend operations.""" + def setUp(self): + super(IPv6RFC2462TestCase, self).setUp() + self.flags(ipv6_backend='rfc2462') + ipv6.reset_backend() + + def test_to_global(self): + addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test') + self.assertEquals(addr, '2001:db8::16:3eff:fe33:4455') + + def test_to_mac(self): + mac = ipv6.to_mac('2001:db8::216:3eff:fe33:4455') + self.assertEquals(mac, '00:16:3e:33:44:55') + + +class IPv6AccountIdentiferTestCase(test.TestCase): + """Unit tests for IPv6 account_identifier backend operations.""" + def setUp(self): + super(IPv6AccountIdentiferTestCase, self).setUp() + self.flags(ipv6_backend='account_identifier') + ipv6.reset_backend() + + def test_to_global(self): + addr = ipv6.to_global('2001:db8::', '02:16:3e:33:44:55', 'test') + self.assertEquals(addr, '2001:db8::a94a:8fe5:ff33:4455') + + def test_to_mac(self): + mac = ipv6.to_mac('2001:db8::a94a:8fe5:ff33:4455') + self.assertEquals(mac, '02:16:3e:33:44:55') diff --git a/nova/tests/test_virt.py b/nova/tests/test_libvirt.py index 7c3dbf654..1743b09a2 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_libvirt.py @@ -31,10 +31,9 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.auth import manager -from nova.compute import manager as compute_manager from nova.compute import power_state -from nova.db.sqlalchemy import models -from nova.virt import libvirt_conn +from nova.virt.libvirt import connection +from nova.virt.libvirt import firewall libvirt = None FLAGS = flags.FLAGS @@ -46,6 +45,27 @@ def _concurrency(wait, done, target): done.send() +def _create_network_info(count=1, ipv6=None): + if ipv6 is None: + ipv6 = FLAGS.use_ipv6 + fake = 'fake' + fake_ip = '0.0.0.0/0' + fake_ip_2 = '0.0.0.1/0' + fake_ip_3 = '0.0.0.1/0' + network = {'gateway': fake, + 'gateway_v6': fake, + 'bridge': fake, + 'cidr': fake_ip, + 'cidr_v6': fake_ip} + mapping = {'mac': fake, + 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} + if ipv6: + mapping['ip6s'] = [{'ip': fake_ip}, + {'ip': fake_ip_2}, + {'ip': fake_ip_3}] + return [(network, mapping) for x in xrange(0, count)] + + class CacheConcurrencyTestCase(test.TestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() @@ -64,7 +84,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_same_fname_concurrency(self): """Ensures that the same fname cache runs at a sequentially""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -85,7 +105,7 @@ class CacheConcurrencyTestCase(test.TestCase): def test_different_fname_concurrency(self): """Ensures that two different fname caches are concurrent""" - conn = libvirt_conn.LibvirtConnection + conn = connection.LibvirtConnection wait1 = eventlet.event.Event() done1 = eventlet.event.Event() eventlet.spawn(conn._cache_image, _concurrency, @@ -106,7 +126,7 @@ class CacheConcurrencyTestCase(test.TestCase): class LibvirtConnTestCase(test.TestCase): def setUp(self): super(LibvirtConnTestCase, self).setUp() - libvirt_conn._late_load_cheetah() + connection._late_load_cheetah() self.flags(fake_call=True) self.manager = manager.AuthManager() @@ -140,7 +160,7 @@ class LibvirtConnTestCase(test.TestCase): 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', - 'instance_type': 'm1.small'} + 'instance_type_id': '5'} # m1.small def lazy_load_library_exists(self): """check if libvirt is available.""" @@ -152,8 +172,8 @@ class LibvirtConnTestCase(test.TestCase): return False global libvirt libvirt = __import__('libvirt') - libvirt_conn.libvirt = __import__('libvirt') - libvirt_conn.libxml2 = __import__('libxml2') + connection.libvirt = __import__('libvirt') + connection.libxml2 = __import__('libxml2') return True def create_fake_libvirt_mock(self, **kwargs): @@ -163,7 +183,7 @@ class LibvirtConnTestCase(test.TestCase): class FakeLibvirtConnection(object): pass - # A fake libvirt_conn.IptablesFirewallDriver + # A fake connection.IptablesFirewallDriver class FakeIptablesFirewallDriver(object): def __init__(self, **kwargs): @@ -179,11 +199,11 @@ class LibvirtConnTestCase(test.TestCase): for key, val in kwargs.items(): fake.__setattr__(key, val) - # Inevitable mocks for libvirt_conn.LibvirtConnection - self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class') - libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn = fake + # Inevitable mocks for connection.LibvirtConnection + self.mox.StubOutWithMock(connection.utils, 'import_class') + connection.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn = fake def create_service(self, **kwargs): service_ref = {'host': kwargs.get('host', 'dummy'), @@ -194,6 +214,37 @@ class LibvirtConnTestCase(test.TestCase): return db.service_create(context.get_admin_context(), service_ref) + def test_preparing_xml_info(self): + conn = connection.LibvirtConnection(True) + instance_ref = db.instance_create(self.context, self.test_instance) + + result = conn._prepare_xml_info(instance_ref, False) + self.assertFalse(result['nics']) + + result = conn._prepare_xml_info(instance_ref, False, + _create_network_info()) + self.assertTrue(len(result['nics']) == 1) + + result = conn._prepare_xml_info(instance_ref, False, + _create_network_info(2)) + self.assertTrue(len(result['nics']) == 2) + + def test_get_nic_for_xml_v4(self): + conn = connection.LibvirtConnection(True) + network, mapping = _create_network_info()[0] + self.flags(use_ipv6=False) + params = conn._get_nic_for_xml(network, mapping)['extra_params'] + self.assertTrue(params.find('PROJNETV6') == -1) + self.assertTrue(params.find('PROJMASKV6') == -1) + + def test_get_nic_for_xml_v6(self): + conn = connection.LibvirtConnection(True) + network, mapping = _create_network_info()[0] + self.flags(use_ipv6=True) + params = conn._get_nic_for_xml(network, mapping)['extra_params'] + self.assertTrue(params.find('PROJNETV6') > -1) + self.assertTrue(params.find('PROJMASKV6') > -1) + def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, @@ -229,6 +280,22 @@ class LibvirtConnTestCase(test.TestCase): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) + def test_multi_nic(self): + instance_data = dict(self.test_instance) + network_info = _create_network_info(2) + conn = connection.LibvirtConnection(True) + instance_ref = db.instance_create(self.context, instance_data) + xml = conn.to_xml(instance_ref, False, network_info) + tree = xml_to_tree(xml) + interfaces = tree.findall("./devices/interface") + self.assertEquals(len(interfaces), 2) + parameters = interfaces[0].findall('./filterref/parameter') + self.assertEquals(interfaces[0].get('type'), 'bridge') + self.assertEquals(parameters[0].get('name'), 'IP') + self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') + self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') + self.assertEquals(parameters[1].get('value'), 'fake') + def _check_xml_and_container(self, instance): user_context = context.RequestContext(project=self.project, user=self.user) @@ -247,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase): 'instance_id': instance_ref['id']}) self.flags(libvirt_type='lxc') - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') @@ -327,19 +394,13 @@ class LibvirtConnTestCase(test.TestCase): check = (lambda t: t.find('./os/initrd'), None) check_list.append(check) + parameter = './devices/interface/filterref/parameter' common_checks = [ (lambda t: t.find('.').tag, 'domain'), - (lambda t: t.find( - './devices/interface/filterref/parameter').get('name'), 'IP'), - (lambda t: t.find( - './devices/interface/filterref/parameter').get( - 'value'), '10.11.12.13'), - (lambda t: t.findall( - './devices/interface/filterref/parameter')[1].get( - 'name'), 'DHCPSERVER'), - (lambda t: t.findall( - './devices/interface/filterref/parameter')[1].get( - 'value'), '10.0.0.1'), + (lambda t: t.find(parameter).get('name'), 'IP'), + (lambda t: t.find(parameter).get('value'), '10.11.12.13'), + (lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'), + (lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'), (lambda t: t.find('./devices/serial/source').get( 'path').split('/')[1], 'console.log'), (lambda t: t.find('./memory').text, '2097152')] @@ -359,7 +420,7 @@ class LibvirtConnTestCase(test.TestCase): for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, expected_uri) @@ -386,7 +447,7 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.libvirt_uri = testuri for (libvirt_type, (expected_uri, checks)) in type_uri_map.iteritems(): FLAGS.libvirt_type = libvirt_type - conn = libvirt_conn.LibvirtConnection(True) + conn = connection.LibvirtConnection(True) uri = conn.get_uri() self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) @@ -410,13 +471,13 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock(getVersion=getVersion, getType=getType, listDomainsID=listDomainsID) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + self.mox.StubOutWithMock(connection.LibvirtConnection, 'get_cpu_info') - libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + connection.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] @@ -450,8 +511,8 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) - self.assertRaises(exception.Invalid, + conn = connection.LibvirtConnection(False) + self.assertRaises(exception.ComputeServiceUnavailable, conn.update_available_resource, self.context, 'dummy') @@ -479,15 +540,16 @@ class LibvirtConnTestCase(test.TestCase): fake_timer = FakeTime() - self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise) + self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) # Start test self.mox.ReplayAll() try: - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) + conn.firewall_driver.setattr('instance_filter_exists', fake_none) conn.ensure_filtering_rules_for_instance(instance_ref, time=fake_timer) except exception.Error, e: @@ -533,7 +595,7 @@ class LibvirtConnTestCase(test.TestCase): # Start test self.mox.ReplayAll() - conn = libvirt_conn.LibvirtConnection(False) + conn = connection.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, self.context, instance_ref, 'dest', '', @@ -548,6 +610,48 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + def test_spawn_with_network_info(self): + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing mocks + def fake_none(self, instance): + return + + self.create_fake_libvirt_mock() + instance = db.instance_create(self.context, self.test_instance) + + # Start test + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.firewall_driver.setattr('setup_basic_filtering', fake_none) + conn.firewall_driver.setattr('prepare_instance_filter', fake_none) + + network = db.project_get_network(context.get_admin_context(), + self.project.id) + ip_dict = {'ip': self.test_ip, + 'netmask': network['netmask'], + 'enabled': '1'} + mapping = {'label': network['label'], + 'gateway': network['gateway'], + 'mac': instance['mac_address'], + 'dns': [network['dns']], + 'ips': [ip_dict]} + network_info = [(network, mapping)] + + try: + conn.spawn(instance, network_info) + except Exception, e: + count = (0 <= str(e.message).find('Unexpected method call')) + + self.assertTrue(count) + + def test_get_host_ip_addr(self): + conn = connection.LibvirtConnection(False) + ip = conn.get_host_ip_addr() + self.assertEquals(ip, FLAGS.my_ip) + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) @@ -570,7 +674,7 @@ class IptablesFirewallTestCase(test.TestCase): """setup_basic_rules in nwfilter calls this.""" pass self.fake_libvirt_connection = FakeLibvirtConnection() - self.fw = libvirt_conn.IptablesFirewallDriver( + self.fw = firewall.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) def tearDown(self): @@ -615,11 +719,15 @@ class IptablesFirewallTestCase(test.TestCase): '# Completed on Tue Jan 18 23:47:56 2011', ] + def _create_instance_ref(self): + return db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake', + 'mac_address': '56:12:12:12:12:12', + 'instance_type_id': 1}) + def test_static_filters(self): - instance_ref = db.instance_create(self.context, - {'user_id': 'fake', - 'project_id': 'fake', - 'mac_address': '56:12:12:12:12:12'}) + instance_ref = self._create_instance_ref() ip = '10.11.12.13' network_ref = db.project_get_network(self.context, @@ -730,6 +838,50 @@ class IptablesFirewallTestCase(test.TestCase): "TCP port 80/81 acceptance rule wasn't added") db.instance_destroy(admin_ctxt, instance_ref['id']) + def test_filters_for_instance_with_ip_v6(self): + self.flags(use_ipv6=True) + network_info = _create_network_info() + rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) + self.assertEquals(len(rulesv4), 2) + self.assertEquals(len(rulesv6), 3) + + def test_filters_for_instance_without_ip_v6(self): + self.flags(use_ipv6=False) + network_info = _create_network_info() + rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info) + self.assertEquals(len(rulesv4), 2) + self.assertEquals(len(rulesv6), 0) + + def test_multinic_iptables(self): + ipv4_rules_per_network = 2 + ipv6_rules_per_network = 3 + networks_count = 5 + instance_ref = self._create_instance_ref() + network_info = _create_network_info(networks_count) + ipv4_len = len(self.fw.iptables.ipv4['filter'].rules) + ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) + inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, + network_info) + self.fw.add_filters_for_instance(instance_ref, network_info) + ipv4 = self.fw.iptables.ipv4['filter'].rules + ipv6 = self.fw.iptables.ipv6['filter'].rules + ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len + ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len + self.assertEquals(ipv4_network_rules, + ipv4_rules_per_network * networks_count) + self.assertEquals(ipv6_network_rules, + ipv6_rules_per_network * networks_count) + + def test_do_refresh_security_group_rules(self): + instance_ref = self._create_instance_ref() + self.mox.StubOutWithMock(self.fw, + 'add_filters_for_instance', + use_mock_anything=True) + self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.instances[instance_ref['id']] = instance_ref + self.mox.ReplayAll() + self.fw.do_refresh_security_group_rules("fake") + def test_provider_firewall_rules(self): # setup basic instance data instance_ref = db.instance_create(self.context, @@ -801,7 +953,7 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection = Mock() - self.fw = libvirt_conn.NWFilterFirewall( + self.fw = firewall.NWFilterFirewall( lambda: self.fake_libvirt_connection) def tearDown(self): @@ -866,6 +1018,28 @@ class NWFilterTestCase(test.TestCase): return db.security_group_get_by_name(self.context, 'fake', 'testgroup') + def _create_instance(self): + return db.instance_create(self.context, + {'user_id': 'fake', + 'project_id': 'fake', + 'mac_address': '00:A0:C9:14:C8:29', + 'instance_type_id': 1}) + + def _create_instance_type(self, params={}): + """Create a test instance""" + context = self.context.elevated() + inst = {} + inst['name'] = 'm1.small' + inst['memory_mb'] = '1024' + inst['vcpus'] = '1' + inst['local_gb'] = '20' + inst['flavorid'] = '1' + inst['swap'] = '2048' + inst['rxtx_quota'] = 100 + inst['rxtx_cap'] = 200 + inst.update(params) + return db.instance_type_create(context, inst)['id'] + def test_creates_base_rule_first(self): # These come pre-defined by libvirt self.defined_filters = ['no-mac-spoofing', @@ -894,24 +1068,18 @@ class NWFilterTestCase(test.TestCase): self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock - instance_ref = db.instance_create(self.context, - {'user_id': 'fake', - 'project_id': 'fake', - 'mac_address': '00:A0:C9:14:C8:29'}) + instance_ref = self._create_instance() inst_id = instance_ref['id'] ip = '10.11.12.13' - network_ref = db.project_get_network(self.context, - 'fake') - - fixed_ip = {'address': ip, - 'network_id': network_ref['id']} + network_ref = db.project_get_network(self.context, 'fake') + fixed_ip = {'address': ip, 'network_id': network_ref['id']} admin_ctxt = context.get_admin_context() db.fixed_ip_create(admin_ctxt, fixed_ip) db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, - 'instance_id': instance_ref['id']}) + 'instance_id': inst_id}) def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], @@ -937,3 +1105,11 @@ class NWFilterTestCase(test.TestCase): _ensure_all_called() self.teardown_security_group() db.instance_destroy(admin_ctxt, instance_ref['id']) + + def test_create_network_filters(self): + instance_ref = self._create_instance() + network_info = _create_network_info(3) + result = self.fw._create_network_filters(instance_ref, + network_info, + "fake") + self.assertEquals(len(result), 3) diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index 4e17e1ce0..cf8f4c05e 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -29,11 +29,12 @@ from nova.utils import parse_mailmap, str_dict_replace class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') - if os.path.exists(os.path.join(topdir, '.bzr')): - contributors = set() - - mailmap = parse_mailmap(os.path.join(topdir, '.mailmap')) + missing = set() + contributors = set() + mailmap = parse_mailmap(os.path.join(topdir, '.mailmap')) + authors_file = open(os.path.join(topdir, 'Authors'), 'r').read() + if os.path.exists(os.path.join(topdir, '.bzr')): import bzrlib.workingtree tree = bzrlib.workingtree.WorkingTree.open(topdir) tree.lock_read() @@ -47,22 +48,36 @@ class ProjectTestCase(test.TestCase): for r in revs: for author in r.get_apparent_authors(): email = author.split(' ')[-1] - contributors.add(str_dict_replace(email, mailmap)) + contributors.add(str_dict_replace(email, + mailmap)) + finally: + tree.unlock() - authors_file = open(os.path.join(topdir, 'Authors'), - 'r').read() + elif os.path.exists(os.path.join(topdir, '.git')): + import git + repo = git.Repo(topdir) + for commit in repo.head.commit.iter_parents(): + email = commit.author.email + if email is None: + email = commit.author.name + if 'nova-core' in email: + continue + if email.split(' ')[-1] == '<>': + email = email.split(' ')[-2] + email = '<' + email + '>' + contributors.add(str_dict_replace(email, mailmap)) - missing = set() - for contributor in contributors: - if contributor == 'nova-core': - continue - if not contributor in authors_file: - missing.add(contributor) + else: + return - self.assertTrue(len(missing) == 0, - '%r not listed in Authors' % missing) - finally: - tree.unlock() + for contributor in contributors: + if contributor == 'nova-core': + continue + if not contributor in authors_file: + missing.add(contributor) + + self.assertTrue(len(missing) == 0, + '%r not listed in Authors' % missing) class LockTestCase(test.TestCase): diff --git a/nova/tests/test_notifier.py b/nova/tests/test_notifier.py new file mode 100644 index 000000000..b6b0fcc68 --- /dev/null +++ b/nova/tests/test_notifier.py @@ -0,0 +1,117 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import nova + +from nova import context +from nova import flags +from nova import rpc +import nova.notifier.api +from nova.notifier.api import notify +from nova.notifier import no_op_notifier +from nova.notifier import rabbit_notifier +from nova import test + +import stubout + + +class NotifierTestCase(test.TestCase): + """Test case for notifications""" + def setUp(self): + super(NotifierTestCase, self).setUp() + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + super(NotifierTestCase, self).tearDown() + + def test_send_notification(self): + self.notify_called = False + + def mock_notify(cls, *args): + self.notify_called = True + + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', + mock_notify) + + class Mock(object): + pass + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + self.assertEqual(self.notify_called, True) + + def test_verify_message_format(self): + """A test to ensure changing the message format is prohibitively + annoying""" + + def message_assert(message): + fields = [('publisher_id', 'publisher_id'), + ('event_type', 'event_type'), + ('priority', 'WARN'), + ('payload', dict(a=3))] + for k, v in fields: + self.assertEqual(message[k], v) + self.assertTrue(len(message['message_id']) > 0) + self.assertTrue(len(message['timestamp']) > 0) + + self.stubs.Set(nova.notifier.no_op_notifier, 'notify', + message_assert) + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + + def test_send_rabbit_notification(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier') + self.mock_cast = False + + def mock_cast(cls, *args): + self.mock_cast = True + + class Mock(object): + pass + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + notify('publisher_id', 'event_type', + nova.notifier.api.WARN, dict(a=3)) + + self.assertEqual(self.mock_cast, True) + + def test_invalid_priority(self): + def mock_cast(cls, *args): + pass + + class Mock(object): + pass + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + self.assertRaises(nova.notifier.api.BadPriorityException, + notify, 'publisher_id', + 'event_type', 'not a priority', dict(a=3)) + + def test_rabbit_priority_queue(self): + self.stubs.Set(nova.flags.FLAGS, 'notification_driver', + 'nova.notifier.rabbit_notifier') + self.stubs.Set(nova.flags.FLAGS, 'notification_topic', + 'testnotify') + + self.test_topic = None + + def mock_cast(context, topic, msg): + self.test_topic = topic + + self.stubs.Set(nova.rpc, 'cast', mock_cast) + notify('publisher_id', + 'event_type', 'DEBUG', dict(a=3)) + self.assertEqual(self.test_topic, 'testnotify.debug') diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index c65bc459d..916fca55e 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -67,7 +67,7 @@ class QuotaTestCase(test.TestCase): inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user.id inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.large' + inst['instance_type_id'] = '3' # m1.large inst['vcpus'] = cores inst['mac_address'] = utils.generate_mac() return db.instance_create(self.context, inst)['id'] @@ -96,39 +96,133 @@ class QuotaTestCase(test.TestCase): num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 2) - db.quota_create(self.context, {'project_id': self.project.id, - 'instances': 10}) + db.quota_create(self.context, self.project.id, 'instances', 10) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 4) - db.quota_update(self.context, self.project.id, {'cores': 100}) + db.quota_create(self.context, self.project.id, 'cores', 100) num_instances = quota.allowed_instances(self.context, 100, self._get_instance_type('m1.small')) self.assertEqual(num_instances, 10) + db.quota_create(self.context, self.project.id, 'ram', 3 * 2048) + num_instances = quota.allowed_instances(self.context, 100, + self._get_instance_type('m1.small')) + self.assertEqual(num_instances, 3) # metadata_items too_many_items = FLAGS.quota_metadata_items + 1000 num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items) - db.quota_update(self.context, self.project.id, {'metadata_items': 5}) + db.quota_create(self.context, self.project.id, 'metadata_items', 5) num_metadata_items = quota.allowed_metadata_items(self.context, too_many_items) self.assertEqual(num_metadata_items, 5) # Cleanup - db.quota_destroy(self.context, self.project.id) + db.quota_destroy_all_by_project(self.context, self.project.id) + + def test_unlimited_instances(self): + FLAGS.quota_instances = 2 + FLAGS.quota_ram = -1 + FLAGS.quota_cores = -1 + instance_type = self._get_instance_type('m1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 2) + db.quota_create(self.context, self.project.id, 'instances', None) + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 100) + num_instances = quota.allowed_instances(self.context, 101, + instance_type) + self.assertEqual(num_instances, 101) + + def test_unlimited_ram(self): + FLAGS.quota_instances = -1 + FLAGS.quota_ram = 2 * 2048 + FLAGS.quota_cores = -1 + instance_type = self._get_instance_type('m1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 2) + db.quota_create(self.context, self.project.id, 'ram', None) + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 100) + num_instances = quota.allowed_instances(self.context, 101, + instance_type) + self.assertEqual(num_instances, 101) + + def test_unlimited_cores(self): + FLAGS.quota_instances = -1 + FLAGS.quota_ram = -1 + FLAGS.quota_cores = 2 + instance_type = self._get_instance_type('m1.small') + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 2) + db.quota_create(self.context, self.project.id, 'cores', None) + num_instances = quota.allowed_instances(self.context, 100, + instance_type) + self.assertEqual(num_instances, 100) + num_instances = quota.allowed_instances(self.context, 101, + instance_type) + self.assertEqual(num_instances, 101) + + def test_unlimited_volumes(self): + FLAGS.quota_volumes = 10 + FLAGS.quota_gigabytes = -1 + volumes = quota.allowed_volumes(self.context, 100, 1) + self.assertEqual(volumes, 10) + db.quota_create(self.context, self.project.id, 'volumes', None) + volumes = quota.allowed_volumes(self.context, 100, 1) + self.assertEqual(volumes, 100) + volumes = quota.allowed_volumes(self.context, 101, 1) + self.assertEqual(volumes, 101) + + def test_unlimited_gigabytes(self): + FLAGS.quota_volumes = -1 + FLAGS.quota_gigabytes = 10 + volumes = quota.allowed_volumes(self.context, 100, 1) + self.assertEqual(volumes, 10) + db.quota_create(self.context, self.project.id, 'gigabytes', None) + volumes = quota.allowed_volumes(self.context, 100, 1) + self.assertEqual(volumes, 100) + volumes = quota.allowed_volumes(self.context, 101, 1) + self.assertEqual(volumes, 101) + + def test_unlimited_floating_ips(self): + FLAGS.quota_floating_ips = 10 + floating_ips = quota.allowed_floating_ips(self.context, 100) + self.assertEqual(floating_ips, 10) + db.quota_create(self.context, self.project.id, 'floating_ips', None) + floating_ips = quota.allowed_floating_ips(self.context, 100) + self.assertEqual(floating_ips, 100) + floating_ips = quota.allowed_floating_ips(self.context, 101) + self.assertEqual(floating_ips, 101) + + def test_unlimited_metadata_items(self): + FLAGS.quota_metadata_items = 10 + items = quota.allowed_metadata_items(self.context, 100) + self.assertEqual(items, 10) + db.quota_create(self.context, self.project.id, 'metadata_items', None) + items = quota.allowed_metadata_items(self.context, 100) + self.assertEqual(items, 100) + items = quota.allowed_metadata_items(self.context, 101) + self.assertEqual(items, 101) def test_too_many_instances(self): instance_ids = [] for i in range(FLAGS.quota_instances): instance_id = self._create_instance() instance_ids.append(instance_id) + inst_type = instance_types.get_instance_type_by_name('m1.small') self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, - instance_type='m1.small', + instance_type=inst_type, image_id=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -137,11 +231,12 @@ class QuotaTestCase(test.TestCase): instance_ids = [] instance_id = self._create_instance(cores=4) instance_ids.append(instance_id) + inst_type = instance_types.get_instance_type_by_name('m1.small') self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, - instance_type='m1.small', + instance_type=inst_type, image_id=1) for instance_id in instance_ids: db.instance_destroy(self.context, instance_id) @@ -192,28 +287,68 @@ class QuotaTestCase(test.TestCase): metadata = {} for i in range(FLAGS.quota_metadata_items + 1): metadata['key%s' % i] = 'value%s' % i + inst_type = instance_types.get_instance_type_by_name('m1.small') self.assertRaises(quota.QuotaError, compute.API().create, self.context, min_count=1, max_count=1, - instance_type='m1.small', + instance_type=inst_type, image_id='fake', metadata=metadata) - def test_allowed_injected_files(self): - self.assertEqual( - quota.allowed_injected_files(self.context), - FLAGS.quota_max_injected_files) + def test_default_allowed_injected_files(self): + FLAGS.quota_max_injected_files = 55 + self.assertEqual(quota.allowed_injected_files(self.context, 100), 55) + + def test_overridden_allowed_injected_files(self): + FLAGS.quota_max_injected_files = 5 + db.quota_create(self.context, self.project.id, 'injected_files', 77) + self.assertEqual(quota.allowed_injected_files(self.context, 100), 77) + + def test_unlimited_default_allowed_injected_files(self): + FLAGS.quota_max_injected_files = -1 + self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) + + def test_unlimited_db_allowed_injected_files(self): + FLAGS.quota_max_injected_files = 5 + db.quota_create(self.context, self.project.id, 'injected_files', None) + self.assertEqual(quota.allowed_injected_files(self.context, 100), 100) + + def test_default_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = 12345 + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 12345) + + def test_overridden_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = 12345 + db.quota_create(self.context, self.project.id, + 'injected_file_content_bytes', 5678) + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 5678) + + def test_unlimited_default_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = -1 + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 23456) + + def test_unlimited_db_allowed_injected_file_content_bytes(self): + FLAGS.quota_max_injected_file_content_bytes = 12345 + db.quota_create(self.context, self.project.id, + 'injected_file_content_bytes', None) + limit = quota.allowed_injected_file_content_bytes(self.context, 23456) + self.assertEqual(limit, 23456) def _create_with_injected_files(self, files): api = compute.API(image_service=self.StubImageService()) + inst_type = instance_types.get_instance_type_by_name('m1.small') api.create(self.context, min_count=1, max_count=1, - instance_type='m1.small', image_id='fake', + instance_type=inst_type, image_id='fake', injected_files=files) def test_no_injected_files(self): api = compute.API(image_service=self.StubImageService()) - api.create(self.context, instance_type='m1.small', image_id='fake') + inst_type = instance_types.get_instance_type_by_name('m1.small') + api.create(self.context, instance_type=inst_type, image_id='fake') def test_max_injected_files(self): files = [] @@ -228,11 +363,6 @@ class QuotaTestCase(test.TestCase): self.assertRaises(quota.QuotaError, self._create_with_injected_files, files) - def test_allowed_injected_file_content_bytes(self): - self.assertEqual( - quota.allowed_injected_file_content_bytes(self.context), - FLAGS.quota_max_injected_file_content_bytes) - def test_max_injected_file_content_bytes(self): max = FLAGS.quota_max_injected_file_content_bytes content = ''.join(['a' for i in xrange(max)]) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 6df74dd61..54b3f80fb 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -120,12 +120,11 @@ class SchedulerTestCase(test.TestCase): dest = 'dummydest' ctxt = context.get_admin_context() - try: - scheduler.show_host_resources(ctxt, dest) - except exception.NotFound, e: - c1 = (e.message.find(_("does not exist or is not a " - "compute node.")) >= 0) - self.assertTrue(c1) + self.assertRaises(exception.NotFound, scheduler.show_host_resources, + ctxt, dest) + #TODO(bcwaldon): reimplement this functionality + #c1 = (e.message.find(_("does not exist or is not a " + # "compute node.")) >= 0) def _dic_is_equal(self, dic1, dic2, keys=None): """Compares 2 dictionary contents(Helper method)""" @@ -263,7 +262,7 @@ class SimpleDriverTestCase(test.TestCase): inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user.id inst['project_id'] = self.project.id - inst['instance_type'] = 'm1.tiny' + inst['instance_type_id'] = '1' inst['mac_address'] = utils.generate_mac() inst['vcpus'] = kwargs.get('vcpus', 1) inst['ami_launch_index'] = 0 @@ -698,14 +697,10 @@ class SimpleDriverTestCase(test.TestCase): 'topic': 'volume', 'report_count': 0} s_ref = db.service_create(self.context, dic) - try: - self.scheduler.driver.schedule_live_migration(self.context, - instance_id, - i_ref['host']) - except exception.Invalid, e: - c = (e.message.find('volume node is not alive') >= 0) + self.assertRaises(exception.VolumeServiceUnavailable, + self.scheduler.driver.schedule_live_migration, + self.context, instance_id, i_ref['host']) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id']) @@ -718,13 +713,10 @@ class SimpleDriverTestCase(test.TestCase): s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) - try: - self.scheduler.driver._live_migration_src_check(self.context, - i_ref) - except exception.Invalid, e: - c = (e.message.find('is not alive') >= 0) + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_src_check, + self.context, i_ref) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -737,7 +729,7 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_src_check(self.context, i_ref) - self.assertTrue(ret == None) + self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -749,14 +741,10 @@ class SimpleDriverTestCase(test.TestCase): s_ref = self._create_compute_service(created_at=t, updated_at=t, host=i_ref['host']) - try: - self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - i_ref['host']) - except exception.Invalid, e: - c = (e.message.find('is not alive') >= 0) + self.assertRaises(exception.ComputeServiceUnavailable, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -766,14 +754,10 @@ class SimpleDriverTestCase(test.TestCase): i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) - try: - self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - i_ref['host']) - except exception.Invalid, e: - c = (e.message.find('choose other host') >= 0) + self.assertRaises(exception.UnableToMigrateToSelf, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, i_ref['host']) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -784,14 +768,10 @@ class SimpleDriverTestCase(test.TestCase): s_ref = self._create_compute_service(host='somewhere', memory_mb_used=12) - try: - self.scheduler.driver._live_migration_dest_check(self.context, - i_ref, - 'somewhere') - except exception.NotEmpty, e: - c = (e.message.find('Unable to migrate') >= 0) + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere') - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -805,7 +785,7 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, 'somewhere') - self.assertTrue(ret == None) + self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -837,14 +817,10 @@ class SimpleDriverTestCase(test.TestCase): "args": {'filename': fpath}}) self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except exception.Invalid, e: - c = (e.message.find('does not exist') >= 0) + self.assertRaises(exception.SourceHostUnavailable, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -865,14 +841,10 @@ class SimpleDriverTestCase(test.TestCase): driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except exception.Invalid, e: - c = (e.message.find(_('Different hypervisor type')) >= 0) + self.assertRaises(exception.InvalidHypervisorType, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -895,14 +867,10 @@ class SimpleDriverTestCase(test.TestCase): driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.ReplayAll() - try: - self.scheduler.driver._live_migration_common_check(self.context, - i_ref, - dest) - except exception.Invalid, e: - c = (e.message.find(_('Older hypervisor version')) >= 0) + self.assertRaises(exception.DestinationHypervisorTooOld, + self.scheduler.driver._live_migration_common_check, + self.context, i_ref, dest) - self.assertTrue(c) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -944,7 +912,8 @@ class SimpleDriverTestCase(test.TestCase): class FakeZone(object): - def __init__(self, api_url, username, password): + def __init__(self, id, api_url, username, password): + self.id = id self.api_url = api_url self.username = username self.password = password @@ -952,7 +921,7 @@ class FakeZone(object): def zone_get_all(context): return [ - FakeZone('http://example.com', 'bob', 'xxx'), + FakeZone(1, 'http://example.com', 'bob', 'xxx'), ] @@ -968,7 +937,7 @@ class FakeRerouteCompute(api.reroute_compute): def go_boom(self, context, instance): - raise exception.InstanceNotFound("boom message", instance) + raise exception.InstanceNotFound(instance_id=instance) def found_instance(self, context, instance): @@ -1017,11 +986,8 @@ class ZoneRedirectTest(test.TestCase): def test_routing_flags(self): FLAGS.enable_zone_routing = False decorator = FakeRerouteCompute("foo") - try: - result = decorator(go_boom)(None, None, 1) - self.assertFail(_("Should have thrown exception.")) - except exception.InstanceNotFound, e: - self.assertEquals(e.message, 'boom message') + self.assertRaises(exception.InstanceNotFound, decorator(go_boom), + None, None, 1) def test_get_collection_context_and_id(self): decorator = api.reroute_compute("foo") @@ -1072,7 +1038,7 @@ class FakeNovaClient(object): class DynamicNovaClientTest(test.TestCase): def test_issue_novaclient_command_found(self): - zone = FakeZone('http://example.com', 'bob', 'xxx') + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), zone, "servers", "get", 100).a, 10) @@ -1086,7 +1052,7 @@ class DynamicNovaClientTest(test.TestCase): zone, "servers", "pause", 100), None) def test_issue_novaclient_command_not_found(self): - zone = FakeZone('http://example.com', 'bob', 'xxx') + zone = FakeZone(1, 'http://example.com', 'bob', 'xxx') self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), zone, "servers", "get", 100), None) @@ -1098,3 +1064,55 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), zone, "servers", "any", "name"), None) + + +class FakeZonesProxy(object): + def do_something(*args, **kwargs): + return 42 + + def raises_exception(*args, **kwargs): + raise Exception('testing') + + +class FakeNovaClientOpenStack(object): + def __init__(self, *args, **kwargs): + self.zones = FakeZonesProxy() + + def authenticate(self): + pass + + +class CallZoneMethodTest(test.TestCase): + def setUp(self): + super(CallZoneMethodTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(novaclient, 'OpenStack', FakeNovaClientOpenStack) + + def tearDown(self): + self.stubs.UnsetAll() + super(CallZoneMethodTest, self).tearDown() + + def test_call_zone_method(self): + context = {} + method = 'do_something' + results = api.call_zone_method(context, method) + expected = [(1, 42)] + self.assertEqual(expected, results) + + def test_call_zone_method_not_present(self): + context = {} + method = 'not_present' + self.assertRaises(AttributeError, api.call_zone_method, + context, method) + + def test_call_zone_method_generates_exception(self): + context = {} + method = 'raises_exception' + results = api.call_zone_method(context, method) + + # FIXME(sirp): for now the _error_trap code is catching errors and + # converting them to a ("ERROR", "string") tuples. The code (and this + # test) should eventually handle real exceptions. + expected = [(1, ('ERROR', 'testing'))] + self.assertEqual(expected, results) diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index e08d229b0..8f7e83c3e 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -17,9 +17,9 @@ import os import tempfile +from nova import exception from nova import test from nova import utils -from nova import exception class ExecuteTestCase(test.TestCase): @@ -250,3 +250,28 @@ class GetFromPathTestCase(test.TestCase): input = {'a': [1, 2, {'b': 'b_1'}]} self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a")) self.assertEquals(['b_1'], f(input, "a/b")) + + +class GenericUtilsTestCase(test.TestCase): + def test_parse_server_string(self): + result = utils.parse_server_string('::1') + self.assertEqual(('::1', ''), result) + result = utils.parse_server_string('[::1]:8773') + self.assertEqual(('::1', '8773'), result) + result = utils.parse_server_string('2001:db8::192.168.1.1') + self.assertEqual(('2001:db8::192.168.1.1', ''), result) + result = utils.parse_server_string('[2001:db8::192.168.1.1]:8773') + self.assertEqual(('2001:db8::192.168.1.1', '8773'), result) + result = utils.parse_server_string('192.168.1.1') + self.assertEqual(('192.168.1.1', ''), result) + result = utils.parse_server_string('192.168.1.2:8773') + self.assertEqual(('192.168.1.2', '8773'), result) + result = utils.parse_server_string('192.168.1.3') + self.assertEqual(('192.168.1.3', ''), result) + result = utils.parse_server_string('www.example.com:8443') + self.assertEqual(('www.example.com', '8443'), result) + result = utils.parse_server_string('www.example.com') + self.assertEqual(('www.example.com', ''), result) + # error case + result = utils.parse_server_string('www.exa:mple.com:8443') + self.assertEqual(('', ''), result) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index d71b75f3f..236d12434 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -106,7 +106,7 @@ class VolumeTestCase(test.TestCase): inst['launch_time'] = '10' inst['user_id'] = 'fake' inst['project_id'] = 'fake' - inst['instance_type'] = 'm1.tiny' + inst['instance_type_id'] = '2' # m1.tiny inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 instance_id = db.instance_create(self.context, inst)['id'] @@ -142,7 +142,7 @@ class VolumeTestCase(test.TestCase): self.assertEqual(vol['status'], "available") self.volume.delete_volume(self.context, volume_id) - self.assertRaises(exception.Error, + self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, volume_id) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 17e3f55e9..be1e35697 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -16,7 +16,9 @@ """Test suite for XenAPI.""" +import eventlet import functools +import json import os import re import stubout @@ -80,7 +82,7 @@ class XenAPIVolumeTestCase(test.TestCase): 'image_id': 1, 'kernel_id': 2, 'ramdisk_id': 3, - 'instance_type': 'm1.large', + 'instance_type_id': '3', # m1.large 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux'} @@ -197,6 +199,28 @@ class XenAPIVMTestCase(test.TestCase): self.context = context.RequestContext('fake', 'fake', False) self.conn = xenapi_conn.get_connection(False) + def test_parallel_builds(self): + stubs.stubout_loopingcall_delay(self.stubs) + + def _do_build(id, proj, user, *args): + values = { + 'id': id, + 'project_id': proj, + 'user_id': user, + 'image_id': 1, + 'kernel_id': 2, + 'ramdisk_id': 3, + 'instance_type_id': '3', # m1.large + 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'os_type': 'linux'} + instance = db.instance_create(self.context, values) + self.conn.spawn(instance) + + gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id) + gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id) + gt1.wait() + gt2.wait() + def test_list_instances_0(self): instances = self.conn.list_instances() self.assertEquals(instances, []) @@ -289,11 +313,11 @@ class XenAPIVMTestCase(test.TestCase): 'enabled':'1'}], 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff', 'netmask': '120', - 'enabled': '1', - 'gateway': 'fe80::a00:1'}], + 'enabled': '1'}], 'mac': 'aa:bb:cc:dd:ee:ff', 'dns': ['10.0.0.2'], - 'gateway': '10.0.0.1'}) + 'gateway': '10.0.0.1', + 'gateway6': 'fe80::a00:1'}) def check_vm_params_for_windows(self): self.assertEquals(self.vm['platform']['nx'], 'true') @@ -328,7 +352,7 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_policy'], '') def _test_spawn(self, image_id, kernel_id, ramdisk_id, - instance_type="m1.large", os_type="linux", + instance_type_id="3", os_type="linux", instance_id=1, check_injection=False): stubs.stubout_loopingcall_start(self.stubs) values = {'id': instance_id, @@ -337,7 +361,7 @@ class XenAPIVMTestCase(test.TestCase): 'image_id': image_id, 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, - 'instance_type': instance_type, + 'instance_type_id': instance_type_id, 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': os_type} instance = db.instance_create(self.context, values) @@ -349,7 +373,7 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_image_service = 'glance' self.assertRaises(Exception, self._test_spawn, - 1, 2, 3, "m1.xlarge") + 1, 2, 3, "4") # m1.xlarge def test_spawn_raw_objectstore(self): FLAGS.xenapi_image_service = 'objectstore' @@ -523,7 +547,7 @@ class XenAPIVMTestCase(test.TestCase): 'image_id': 1, 'kernel_id': 2, 'ramdisk_id': 3, - 'instance_type': 'm1.large', + 'instance_type_id': '3', # m1.large 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux'} instance = db.instance_create(self.context, values) @@ -580,7 +604,7 @@ class XenAPIMigrateInstance(test.TestCase): 'kernel_id': None, 'ramdisk_id': None, 'local_gb': 5, - 'instance_type': 'm1.large', + 'instance_type_id': '3', # m1.large 'mac_address': 'aa:bb:cc:dd:ee:ff', 'os_type': 'linux'} @@ -665,3 +689,52 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD self.fake_instance.kernel_id = None self.assert_disk_type(vm_utils.ImageType.DISK_VHD) + + +class FakeXenApi(object): + """Fake XenApi for testing HostState.""" + + class FakeSR(object): + def get_record(self, ref): + return {'virtual_allocation': 10000, + 'physical_utilisation': 20000} + + SR = FakeSR() + + +class FakeSession(object): + """Fake Session class for HostState testing.""" + + def async_call_plugin(self, *args): + return None + + def wait_for_task(self, *args): + vm = {'total': 10, + 'overhead': 20, + 'free': 30, + 'free-computed': 40} + return json.dumps({'host_memory': vm}) + + def get_xenapi(self): + return FakeXenApi() + + +class HostStateTestCase(test.TestCase): + """Tests HostState, which holds metrics from XenServer that get + reported back to the Schedulers.""" + + def _fake_safe_find_sr(self, session): + """None SR ref since we're ignoring it in FakeSR.""" + return None + + def test_host_state(self): + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(vm_utils, 'safe_find_sr', self._fake_safe_find_sr) + host_state = xenapi_conn.HostState(FakeSession()) + stats = host_state._stats + self.assertEquals(stats['disk_total'], 10000) + self.assertEquals(stats['disk_used'], 20000) + self.assertEquals(stats['host_memory_total'], 10) + self.assertEquals(stats['host_memory_overhead'], 20) + self.assertEquals(stats['host_memory_free'], 30) + self.assertEquals(stats['host_memory_free_computed'], 40) diff --git a/nova/tests/test_zone_aware_scheduler.py b/nova/tests/test_zone_aware_scheduler.py new file mode 100644 index 000000000..fdcde34c9 --- /dev/null +++ b/nova/tests/test_zone_aware_scheduler.py @@ -0,0 +1,119 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Tests For Zone Aware Scheduler. +""" + +from nova import test +from nova.scheduler import driver +from nova.scheduler import zone_aware_scheduler +from nova.scheduler import zone_manager + + +class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): + def filter_hosts(self, num, specs): + # NOTE(sirp): this is returning [(hostname, services)] + return self.zone_manager.service_states.items() + + def weigh_hosts(self, num, specs, hosts): + fake_weight = 99 + weighted = [] + for hostname, caps in hosts: + weighted.append(dict(weight=fake_weight, name=hostname)) + return weighted + + +class FakeZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = { + 'host1': { + 'compute': {'ram': 1000} + }, + 'host2': { + 'compute': {'ram': 2000} + }, + 'host3': { + 'compute': {'ram': 3000} + } + } + + +class FakeEmptyZoneManager(zone_manager.ZoneManager): + def __init__(self): + self.service_states = {} + + +def fake_empty_call_zone_method(context, method, specs): + return [] + + +def fake_call_zone_method(context, method, specs): + return [ + ('zone1', [ + dict(weight=1, blob='AAAAAAA'), + dict(weight=111, blob='BBBBBBB'), + dict(weight=112, blob='CCCCCCC'), + dict(weight=113, blob='DDDDDDD'), + ]), + ('zone2', [ + dict(weight=120, blob='EEEEEEE'), + dict(weight=2, blob='FFFFFFF'), + dict(weight=122, blob='GGGGGGG'), + dict(weight=123, blob='HHHHHHH'), + ]), + ('zone3', [ + dict(weight=130, blob='IIIIIII'), + dict(weight=131, blob='JJJJJJJ'), + dict(weight=132, blob='KKKKKKK'), + dict(weight=3, blob='LLLLLLL'), + ]), + ] + + +class ZoneAwareSchedulerTestCase(test.TestCase): + """Test case for Zone Aware Scheduler.""" + + def test_zone_aware_scheduler(self): + """ + Create a nested set of FakeZones, ensure that a select call returns the + appropriate build plan. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + + zm = FakeZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + build_plan = sched.select(fake_context, {}) + + self.assertEqual(15, len(build_plan)) + + hostnames = [plan_item['name'] + for plan_item in build_plan if 'name' in plan_item] + self.assertEqual(3, len(hostnames)) + + def test_empty_zone_aware_scheduler(self): + """ + Ensure empty hosts & child_zones result in NoValidHosts exception. + """ + sched = FakeZoneAwareScheduler() + self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + + zm = FakeEmptyZoneManager() + sched.set_zone_manager(zm) + + fake_context = {} + self.assertRaises(driver.NoValidHost, sched.schedule, fake_context, {}) diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py index 688dc704d..e132809dc 100644 --- a/nova/tests/test_zones.py +++ b/nova/tests/test_zones.py @@ -78,38 +78,32 @@ class ZoneManagerTestCase(test.TestCase): def test_service_capabilities(self): zm = zone_manager.ZoneManager() - caps = zm.get_zone_capabilities(self, None) + caps = zm.get_zone_capabilities(None) self.assertEquals(caps, {}) zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) - caps = zm.get_zone_capabilities(self, None) + caps = zm.get_zone_capabilities(None) self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2))) zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3)) - caps = zm.get_zone_capabilities(self, None) + caps = zm.get_zone_capabilities(None) self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3))) zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30)) - caps = zm.get_zone_capabilities(self, None) + caps = zm.get_zone_capabilities(None) self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30))) zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99)) - caps = zm.get_zone_capabilities(self, None) + caps = zm.get_zone_capabilities(None) self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30), svc10_a=(99, 99), svc10_b=(99, 99))) zm.update_service_capabilities("svc1", "host3", dict(c=5)) - caps = zm.get_zone_capabilities(self, None) + caps = zm.get_zone_capabilities(None) self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30), svc1_c=(5, 5), svc10_a=(99, 99), svc10_b=(99, 99))) - caps = zm.get_zone_capabilities(self, 'svc1') - self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30), - svc1_c=(5, 5))) - caps = zm.get_zone_capabilities(self, 'svc10') - self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99))) - def test_refresh_from_db_replace_existing(self): zm = zone_manager.ZoneManager() zone_state = zone_manager.ZoneState() diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 205f6c902..4833ccb07 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -16,6 +16,7 @@ """Stubouts, mocks and fixtures for the test suite""" +import eventlet from nova.virt import xenapi_conn from nova.virt.xenapi import fake from nova.virt.xenapi import volume_utils @@ -28,29 +29,6 @@ def stubout_instance_snapshot(stubs): @classmethod def fake_fetch_image(cls, session, instance_id, image, user, project, type): - # Stubout wait_for_task - def fake_wait_for_task(self, task, id): - class FakeEvent: - - def send(self, value): - self.rv = value - - def wait(self): - return self.rv - - done = FakeEvent() - self._poll_task(id, task, done) - rv = done.wait() - return rv - - def fake_loop(self): - pass - - stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', - fake_wait_for_task) - - stubs.Set(xenapi_conn.XenAPISession, '_stop_loop', fake_loop) - from nova.virt.xenapi.fake import create_vdi name_label = "instance-%s" % instance_id #TODO: create fake SR record @@ -63,11 +41,6 @@ def stubout_instance_snapshot(stubs): stubs.Set(vm_utils.VMHelper, 'fetch_image', fake_fetch_image) - def fake_parse_xmlrpc_value(val): - return val - - stubs.Set(xenapi_conn, '_parse_xmlrpc_value', fake_parse_xmlrpc_value) - def fake_wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): from nova.virt.xenapi.fake import create_vdi @@ -144,6 +117,16 @@ def stubout_loopingcall_start(stubs): stubs.Set(utils.LoopingCall, 'start', fake_start) +def stubout_loopingcall_delay(stubs): + def fake_start(self, interval, now=True): + self._running = True + eventlet.sleep(1) + self.f(*self.args, **self.kw) + # This would fail before parallel xenapi calls were fixed + assert self._running == False + stubs.Set(utils.LoopingCall, 'start', fake_start) + + class FakeSessionForVMTests(fake.SessionBase): """ Stubs out a XenAPISession for VM tests """ def __init__(self, uri): diff --git a/nova/utils.py b/nova/utils.py index 3f6f9fc8a..361fc9873 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -System-level utilities and helper functions. -""" +"""Utilities and helper functions.""" import base64 import datetime @@ -43,9 +41,8 @@ from eventlet import event from eventlet import greenthread from eventlet import semaphore from eventlet.green import subprocess -None + from nova import exception -from nova.exception import ProcessExecutionError from nova import flags from nova import log as logging @@ -56,18 +53,18 @@ FLAGS = flags.FLAGS def import_class(import_str): - """Returns a class from a string including module and class""" + """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') try: __import__(mod_str) return getattr(sys.modules[mod_str], class_str) except (ImportError, ValueError, AttributeError), exc: LOG.debug(_('Inner Exception: %s'), exc) - raise exception.NotFound(_('Class %s cannot be found') % class_str) + raise exception.ClassNotFound(class_name=class_str) def import_object(import_str): - """Returns an object including a module or module and class""" + """Returns an object including a module or module and class.""" try: __import__(import_str) return sys.modules[import_str] @@ -99,11 +96,12 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): cli_id = 64 bit identifier ? = unknown, probably flags/padding bit 9 was 1 and the rest were 0 in testing + """ if session_id is None: session_id = random.randint(0, 0xffffffffffffffff) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - data = struct.pack("!BQxxxxxx", 0x38, session_id) + data = struct.pack('!BQxxxxxx', 0x38, session_id) sock.sendto(data, (address, port)) sock.settimeout(timeout) try: @@ -112,7 +110,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): return False finally: sock.close() - fmt = "!BQxxxxxQxxxx" + fmt = '!BQxxxxxQxxxx' if len(received) != struct.calcsize(fmt): print struct.calcsize(fmt) return False @@ -122,15 +120,8 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): def fetchfile(url, target): - LOG.debug(_("Fetching %s") % url) -# c = pycurl.Curl() -# fp = open(target, "wb") -# c.setopt(c.URL, url) -# c.setopt(c.WRITEDATA, fp) -# c.perform() -# c.close() -# fp.close() - execute("curl", "--fail", url, "-o", target) + LOG.debug(_('Fetching %s') % url) + execute('curl', '--fail', url, '-o', target) def execute(*cmd, **kwargs): @@ -147,7 +138,7 @@ def execute(*cmd, **kwargs): while attempts > 0: attempts -= 1 try: - LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd)) + LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -157,26 +148,27 @@ def execute(*cmd, **kwargs): stderr=subprocess.PIPE, env=env) result = None - if process_input != None: + if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() if obj.returncode: - LOG.debug(_("Result was %s") % obj.returncode) + LOG.debug(_('Result was %s') % obj.returncode) if type(check_exit_code) == types.IntType \ and obj.returncode != check_exit_code: (stdout, stderr) = result - raise ProcessExecutionError(exit_code=obj.returncode, - stdout=stdout, - stderr=stderr, - cmd=' '.join(cmd)) + raise exception.ProcessExecutionError( + exit_code=obj.returncode, + stdout=stdout, + stderr=stderr, + cmd=' '.join(cmd)) return result - except ProcessExecutionError: + except exception.ProcessExecutionError: if not attempts: raise else: - LOG.debug(_("%r failed. Retrying."), cmd) + LOG.debug(_('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: @@ -188,13 +180,13 @@ def execute(*cmd, **kwargs): def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): - LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd)) + LOG.debug(_('Running cmd (SSH): %s'), ' '.join(cmd)) if addl_env: - raise exception.Error("Environment not supported over SSH") + raise exception.Error(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... - raise exception.Error("process_input not supported over SSH") + raise exception.Error(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel @@ -212,7 +204,7 @@ def ssh_execute(ssh, cmd, process_input=None, # exit_status == -1 if no exit code was returned if exit_status != -1: - LOG.debug(_("Result was %s") % exit_status) + LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, @@ -240,9 +232,12 @@ def default_flagfile(filename='nova.conf'): # turn relative filename into an absolute path script_dir = os.path.dirname(inspect.stack()[-1][1]) filename = os.path.abspath(os.path.join(script_dir, filename)) - if os.path.exists(filename): - flagfile = ['--flagfile=%s' % filename] - sys.argv = sys.argv[:1] + flagfile + sys.argv[1:] + if not os.path.exists(filename): + filename = "./nova.conf" + if not os.path.exists(filename): + filename = '/etc/nova/nova.conf' + flagfile = ['--flagfile=%s' % filename] + sys.argv = sys.argv[:1] + flagfile + sys.argv[1:] def debug(arg): @@ -251,7 +246,7 @@ def debug(arg): def runthis(prompt, *cmd, **kwargs): - LOG.debug(_("Running %s"), (" ".join(cmd))) + LOG.debug(_('Running %s'), (' '.join(cmd))) rv, err = execute(*cmd, **kwargs) @@ -266,68 +261,49 @@ def generate_mac(): random.randint(0x00, 0x7f), random.randint(0x00, 0xff), random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: "%02x" % x, mac)) + return ':'.join(map(lambda x: '%02x' % x, mac)) # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol -DEFAULT_PASSWORD_SYMBOLS = ("23456789" # Removed: 0,1 - "ABCDEFGHJKLMNPQRSTUVWXYZ" # Removed: I, O - "abcdefghijkmnopqrstuvwxyz") # Removed: l +DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ' # Removed: I, O + 'abcdefghijkmnopqrstuvwxyz') # Removed: l # ~5 bits per symbol -EASIER_PASSWORD_SYMBOLS = ("23456789" # Removed: 0, 1 - "ABCDEFGHJKLMNPQRSTUVWXYZ") # Removed: I, O +EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1 + 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbols. Believed to be reasonably secure (with a reasonable password length!) + """ r = random.SystemRandom() - return "".join([r.choice(symbols) for _i in xrange(length)]) + return ''.join([r.choice(symbols) for _i in xrange(length)]) def last_octet(address): - return int(address.split(".")[-1]) + return int(address.split('.')[-1]) def get_my_linklocal(interface): try: - if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface) - condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link" + if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) + condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: - raise exception.Error(_("Link Local address is not found.:%s") + raise exception.Error(_('Link Local address is not found.:%s') % if_str) except Exception as ex: raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" - " :%(ex)s") % locals()) - - -def to_global_ipv6(prefix, mac): - try: - mac64 = netaddr.EUI(mac).eui64().words - int_addr = int(''.join(['%02x' % i for i in mac64]), 16) - mac64_addr = netaddr.IPAddress(int_addr) - maskIP = netaddr.IPNetwork(prefix).ip - return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\ - format() - except TypeError: - raise TypeError(_("Bad mac for to_global_ipv6: %s") % mac) - - -def to_mac(ipv6_address): - address = netaddr.IPAddress(ipv6_address) - mask1 = netaddr.IPAddress("::ffff:ffff:ffff:ffff") - mask2 = netaddr.IPAddress("::0200:0:0:0") - mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words - return ":".join(["%02x" % i for i in mac64[0:3] + mac64[5:8]]) + " :%(ex)s") % locals()) def utcnow(): @@ -341,7 +317,7 @@ utcnow.override_time = None def is_older_than(before, seconds): - """Return True if before is older than seconds""" + """Return True if before is older than seconds.""" return utcnow() - before > datetime.timedelta(seconds=seconds) @@ -379,7 +355,7 @@ def isotime(at=None): def parse_isotime(timestr): - """Turn an iso formatted time back into a datetime""" + """Turn an iso formatted time back into a datetime.""" return datetime.datetime.strptime(timestr, TIME_FORMAT) @@ -433,16 +409,19 @@ class LazyPluggable(object): class LoopingCallDone(Exception): - """The poll-function passed to LoopingCall can raise this exception to + """Exception to break out and stop a LoopingCall. + + The poll-function passed to LoopingCall can raise this exception to break out of the loop normally. This is somewhat analogous to StopIteration. An optional return-value can be included as the argument to the exception; this return-value will be returned by LoopingCall.wait() + """ def __init__(self, retvalue=True): - """:param retvalue: Value that LoopingCall.wait() should return""" + """:param retvalue: Value that LoopingCall.wait() should return.""" self.retvalue = retvalue @@ -463,6 +442,8 @@ class LoopingCall(object): try: while self._running: self.f(*self.args, **self.kw) + if not self._running: + break greenthread.sleep(interval) except LoopingCallDone, e: self.stop() @@ -493,7 +474,7 @@ def xhtml_escape(value): http://github.com/facebook/tornado/blob/master/tornado/escape.py """ - return saxutils.escape(value, {'"': """}) + return saxutils.escape(value, {'"': '"'}) def utf8(value): @@ -504,7 +485,7 @@ def utf8(value): """ if isinstance(value, unicode): - return value.encode("utf-8") + return value.encode('utf-8') assert isinstance(value, str) return value @@ -554,7 +535,7 @@ class _NoopContextManager(object): def synchronized(name, external=False): - """Synchronization decorator + """Synchronization decorator. Decorating a method like so: @synchronized('mylock') @@ -578,6 +559,7 @@ def synchronized(name, external=False): multiple processes. This means that if two different workers both run a a method decorated with @synchronized('mylock', external=True), only one of them will execute at a time. + """ def wrap(f): @@ -590,13 +572,13 @@ def synchronized(name, external=False): _semaphores[name] = semaphore.Semaphore() sem = _semaphores[name] LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method ' - '"%(method)s"...' % {"lock": name, - "method": f.__name__})) + '"%(method)s"...' % {'lock': name, + 'method': f.__name__})) with sem: if external: LOG.debug(_('Attempting to grab file lock "%(lock)s" for ' 'method "%(method)s"...' % - {"lock": name, "method": f.__name__})) + {'lock': name, 'method': f.__name__})) lock_file_path = os.path.join(FLAGS.lock_path, 'nova-%s.lock' % name) lock = lockfile.FileLock(lock_file_path) @@ -617,21 +599,23 @@ def synchronized(name, external=False): def get_from_path(items, path): - """ Returns a list of items matching the specified path. Takes an - XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, - looks up items[prop1][prop2][prop3]. Like XPath, if any of the + """Returns a list of items matching the specified path. + + Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item + in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list - will contain no None values.""" + will contain no None values. + """ if path is None: - raise exception.Error("Invalid mini_xpath") + raise exception.Error('Invalid mini_xpath') - (first_token, sep, remainder) = path.partition("/") + (first_token, sep, remainder) = path.partition('/') - if first_token == "": - raise exception.Error("Invalid mini_xpath") + if first_token == '': + raise exception.Error('Invalid mini_xpath') results = [] @@ -645,7 +629,7 @@ def get_from_path(items, path): for item in items: if item is None: continue - get_method = getattr(item, "get", None) + get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) @@ -666,7 +650,7 @@ def get_from_path(items, path): def flatten_dict(dict_, flattened=None): - """Recursively flatten a nested dictionary""" + """Recursively flatten a nested dictionary.""" flattened = flattened or {} for key, value in dict_.iteritems(): if hasattr(value, 'iteritems'): @@ -677,9 +661,7 @@ def flatten_dict(dict_, flattened=None): def partition_dict(dict_, keys): - """Return two dicts, one containing only `keys` the other containing - everything but `keys` - """ + """Return two dicts, one with `keys` the other with everything else.""" intersection = {} difference = {} for key, value in dict_.iteritems(): @@ -691,9 +673,7 @@ def partition_dict(dict_, keys): def map_dict_keys(dict_, key_map): - """Return a dictionary in which the dictionaries keys are mapped to - new keys. - """ + """Return a dict in which the dictionaries keys are mapped to new keys.""" mapped = {} for key, value in dict_.iteritems(): mapped_key = key_map[key] if key in key_map else key @@ -702,15 +682,45 @@ def map_dict_keys(dict_, key_map): def subset_dict(dict_, keys): - """Return a dict that only contains a subset of keys""" + """Return a dict that only contains a subset of keys.""" subset = partition_dict(dict_, keys)[0] return subset def check_isinstance(obj, cls): - """Checks that obj is of type cls, and lets PyLint infer types""" + """Checks that obj is of type cls, and lets PyLint infer types.""" if isinstance(obj, cls): return obj - raise Exception(_("Expected object of type: %s") % (str(cls))) + raise Exception(_('Expected object of type: %s') % (str(cls))) # TODO(justinsb): Can we make this better?? return cls() # Ugly PyLint hack + + +def parse_server_string(server_str): + """ + Parses the given server_string and returns a list of host and port. + If it's not a combination of host part and port, the port element + is a null string. If the input is invalid expression, return a null + list. + """ + try: + # First of all, exclude pure IPv6 address (w/o port). + if netaddr.valid_ipv6(server_str): + return (server_str, '') + + # Next, check if this is IPv6 address with a port number combination. + if server_str.find("]:") != -1: + (address, port) = server_str.replace('[', '', 1).split(']:') + return (address, port) + + # Third, check if this is a combination of an address and a port + if server_str.find(':') == -1: + return (server_str, '') + + # This must be a combination of an address and a port + (address, port) = server_str.split(':') + return (address, port) + + except: + LOG.debug(_('Invalid server_string: %s' % server_str)) + return ('', '') diff --git a/nova/version.py b/nova/version.py index c3ecc2245..1f8d08e8c 100644 --- a/nova/version.py +++ b/nova/version.py @@ -21,9 +21,9 @@ except ImportError: 'revision_id': 'LOCALREVISION', 'revno': 0} -NOVA_VERSION = ['2011', '2'] -YEAR, COUNT = NOVA_VERSION +NOVA_VERSION = ['2011', '3'] +YEAR, COUNT = NOVA_VERSION FINAL = False # This becomes true at Release Candidate time @@ -39,8 +39,8 @@ def version_string(): def vcs_version_string(): - return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + return '%s:%s' % (version_info['branch_nick'], version_info['revision_id']) def version_string_with_vcs(): - return "%s-%s" % (canonical_version_string(), vcs_version_string()) + return '%s-%s' % (canonical_version_string(), vcs_version_string()) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 99a8849f1..aeec17c98 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -27,9 +27,9 @@ from nova import utils from nova.virt import driver from nova.virt import fake from nova.virt import hyperv -from nova.virt import libvirt_conn from nova.virt import vmwareapi_conn from nova.virt import xenapi_conn +from nova.virt.libvirt import connection as libvirt_conn LOG = logging.getLogger("nova.virt.connection") diff --git a/nova/virt/disk.py b/nova/virt/disk.py index ddea1a1f7..f8aea1f34 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -81,34 +81,36 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): else: mapped_device = device - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc - if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found (we can' - ' only inject raw disk images): %s' % - mapped_device) - - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = utils.execute('sudo', 'tune2fs', - '-c', 0, '-i', 0, mapped_device) - - tmpdir = tempfile.mkdtemp() try: - # mount loopback to dir - out, err = utils.execute( - 'sudo', 'mount', mapped_device, tmpdir) - if err: - raise exception.Error(_('Failed to mount filesystem: %s') - % err) - + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo', 'tune2fs', + '-c', 0, '-i', 0, mapped_device) + + tmpdir = tempfile.mkdtemp() try: - inject_data_into_fs(tmpdir, key, net, utils.execute) + # mount loopback to dir + out, err = utils.execute( + 'sudo', 'mount', mapped_device, tmpdir) + if err: + raise exception.Error(_('Failed to mount filesystem: %s') + % err) + + try: + inject_data_into_fs(tmpdir, key, net, utils.execute) + finally: + # unmount device + utils.execute('sudo', 'umount', mapped_device) finally: - # unmount device - utils.execute('sudo', 'umount', mapped_device) + # remove temporary directory + utils.execute('rmdir', tmpdir) finally: - # remove temporary directory - utils.execute('rmdir', tmpdir) if not partition is None: # remove partitions utils.execute('sudo', 'kpartx', '-d', device) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 3f030833e..970434c6c 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -288,8 +288,7 @@ class FakeConnection(driver.ComputeDriver): knowledge of the instance """ if instance_name not in self.instances: - raise exception.NotFound(_("Instance %s Not Found") - % instance_name) + raise exception.InstanceNotFound(instance_id=instance_name) i = self.instances[instance_name] return {'state': i.state, 'max_mem': 0, @@ -368,7 +367,7 @@ class FakeConnection(driver.ComputeDriver): return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L] def get_console_output(self, instance): - return 'FAKE CONSOLE OUTPUT' + return 'FAKE CONSOLE\xffOUTPUT' def get_ajax_console(self, instance): return {'token': 'FAKETOKEN', diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 13f403a66..1142e97a4 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -143,8 +143,7 @@ class HyperVConnection(driver.ComputeDriver): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: - raise exception.Duplicate(_('Attempt to create duplicate vm %s') % - instance.name) + raise exception.InstanceExists(name=instance.name) user = manager.AuthManager().get_user(instance['user_id']) project = manager.AuthManager().get_project(instance['project_id']) @@ -368,7 +367,7 @@ class HyperVConnection(driver.ComputeDriver): """Reboot the specified instance.""" vm = self._lookup(instance.name) if vm is None: - raise exception.NotFound('instance not present %s' % instance.name) + raise exception.InstanceNotFound(instance_id=instance.id) self._set_vm_state(instance.name, 'Reboot') def destroy(self, instance): @@ -412,7 +411,7 @@ class HyperVConnection(driver.ComputeDriver): """Get information about the VM""" vm = self._lookup(instance_id) if vm is None: - raise exception.NotFound('instance not present %s' % instance_id) + raise exception.InstanceNotFound(instance_id=instance_id) vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0] vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] vmsettings = vm.associators( @@ -474,14 +473,12 @@ class HyperVConnection(driver.ComputeDriver): def attach_volume(self, instance_name, device_path, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise exception.NotFound('Cannot attach volume to missing %s vm' - % instance_name) + raise exception.InstanceNotFound(instance_id=instance_name) def detach_volume(self, instance_name, mountpoint): vm = self._lookup(instance_name) if vm is None: - raise exception.NotFound('Cannot detach volume from missing %s ' - % instance_name) + raise exception.InstanceNotFound(instance_id=instance_name) def poll_rescued_instances(self, timeout): pass @@ -489,3 +486,11 @@ class HyperVConnection(driver.ComputeDriver): def update_available_resource(self, ctxt, host): """This method is supported only by libvirt.""" return + + def update_host_status(self): + """See xenapi_conn.py implementation.""" + pass + + def get_host_stats(self, refresh=False): + """See xenapi_conn.py implementation.""" + pass diff --git a/nova/virt/images.py b/nova/virt/images.py index 2e3f2ee4d..02c898fda 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,19 +21,10 @@ Handling of VM disk images. """ -import os.path -import shutil -import sys -import time -import urllib2 -import urlparse - from nova import context from nova import flags from nova import log as logging from nova import utils -from nova.auth import manager -from nova.auth import signer FLAGS = flags.FLAGS @@ -52,66 +43,6 @@ def fetch(image_id, path, _user, _project): return metadata -# NOTE(vish): The methods below should be unnecessary, but I'm leaving -# them in case the glance client does not work on windows. -def _fetch_image_no_curl(url, path, headers): - request = urllib2.Request(url) - for (k, v) in headers.iteritems(): - request.add_header(k, v) - - def urlretrieve(urlfile, fpath): - chunk = 1 * 1024 * 1024 - f = open(fpath, "wb") - while 1: - data = urlfile.read(chunk) - if not data: - break - f.write(data) - - urlopened = urllib2.urlopen(request) - urlretrieve(urlopened, path) - LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals()) - - -def _fetch_s3_image(image, path, user, project): - url = image_url(image) - - # This should probably move somewhere else, like e.g. a download_as - # method on User objects and at the same time get rewritten to use - # a web client. - headers = {} - headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime()) - - (_, _, url_path, _, _, _) = urlparse.urlparse(url) - access = manager.AuthManager().get_access_key(user, project) - signature = signer.Signer(user.secret.encode()).s3_authorization(headers, - 'GET', - url_path) - headers['Authorization'] = 'AWS %s:%s' % (access, signature) - - if sys.platform.startswith('win'): - return _fetch_image_no_curl(url, path, headers) - else: - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '\'%s: %s\'' % (k, v)] - - cmd += ['-o', path] - return utils.execute(*cmd) - - -def _fetch_local_image(image, path, user, project): - source = _image_path(os.path.join(image, 'image')) - if sys.platform.startswith('win'): - return shutil.copy(source, path) - else: - return utils.execute('cp', source, path) - - -def _image_path(path): - return os.path.join(FLAGS.images_path, path) - - # TODO(vish): xenapi should use the glance client code directly instead # of retrieving the image using this method. def image_url(image): diff --git a/nova/virt/libvirt/__init__.py b/nova/virt/libvirt/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/nova/virt/libvirt/__init__.py diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt/connection.py index ae9e84120..dc2dd1219 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt/connection.py @@ -58,7 +58,6 @@ from nova import db from nova import exception from nova import flags from nova import log as logging -#from nova import test from nova import utils from nova import vnc from nova.auth import manager @@ -67,20 +66,23 @@ from nova.compute import power_state from nova.virt import disk from nova.virt import driver from nova.virt import images +from nova.virt.libvirt import netutils + libvirt = None libxml2 = None Template = None + LOG = logging.getLogger('nova.virt.libvirt_conn') + FLAGS = flags.FLAGS flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') # TODO(vish): These flags should probably go into a shared location flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') - flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') @@ -102,7 +104,7 @@ flags.DEFINE_string('ajaxterm_portrange', '10000-12000', 'Range of ports that ajaxterm should randomly try to bind') flags.DEFINE_string('firewall_driver', - 'nova.virt.libvirt_conn.IptablesFirewallDriver', + 'nova.virt.libvirt.firewall.IptablesFirewallDriver', 'Firewall driver (defaults to iptables)') flags.DEFINE_string('cpuinfo_xml_template', utils.abspath('virt/cpuinfo.xml.template'), @@ -117,6 +119,8 @@ flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', 'binary to use for qemu-img commands') +flags.DEFINE_bool('start_guests_on_host_boot', False, + 'Whether to restart guests when the host reboots') def get_connection(read_only): @@ -142,66 +146,6 @@ def _late_load_cheetah(): Template = t.Template -def _get_net_and_mask(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.netmask()) - - -def _get_net_and_prefixlen(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.prefixlen()) - - -def _get_ip_version(cidr): - net = IPy.IP(cidr) - return int(net.version()) - - -def _get_network_info(instance): - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) - - networks = db.network_get_all_by_instance(admin_context, - instance['id']) - network_info = [] - - def ip_dict(ip): - return { - "ip": ip.address, - "netmask": network["netmask"], - "enabled": "1"} - - def ip6_dict(ip6): - prefix = ip6.network.cidr_v6 - mac = instance.mac_address - return { - "ip": utils.to_global_ipv6(prefix, mac), - "netmask": ip6.network.netmask_v6, - "gateway": ip6.network.gateway_v6, - "enabled": "1"} - - for network in networks: - network_ips = [ip for ip in ip_addresses - if ip.network_id == network.id] - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'mac': instance.mac_address, - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips] - - network_info.append((network, mapping)) - return network_info - - class LibvirtConnection(driver.ComputeDriver): def __init__(self, read_only): @@ -209,7 +153,6 @@ class LibvirtConnection(driver.ComputeDriver): self.libvirt_uri = self.get_uri() self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() - self.interfaces_xml = open(FLAGS.injected_network_template).read() self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -231,12 +174,8 @@ class LibvirtConnection(driver.ComputeDriver): {'name': instance['name'], 'state': state}) db.instance_set_state(ctxt, instance['id'], state) - if state == power_state.SHUTOFF: - # TODO(soren): This is what the compute manager does when you - # terminate # an instance. At some point I figure we'll have a - # "terminated" state and some sort of cleanup job that runs - # occasionally, cleaning them out. - db.instance_destroy(ctxt, instance['id']) + # NOTE(justinsb): We no longer delete SHUTOFF instances, + # the user may want to power them back on if state != power_state.RUNNING: continue @@ -311,29 +250,62 @@ class LibvirtConnection(driver.ComputeDriver): return infos def destroy(self, instance, cleanup=True): + instance_name = instance['name'] + try: - virt_dom = self._conn.lookupByName(instance['name']) - virt_dom.destroy() - except Exception as _err: - pass - # If the instance is already terminated, we're still happy - - # We'll save this for when we do shutdown, - # instead of destroy - but destroy returns immediately - timer = utils.LoopingCall(f=None) + virt_dom = self._lookup_by_name(instance_name) + except exception.NotFound: + virt_dom = None - while True: + # If the instance is already terminated, we're still happy + # Otherwise, destroy it + if virt_dom is not None: try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.SHUTDOWN: - break - except Exception: - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) - break + virt_dom.destroy() + except libvirt.libvirtError as e: + is_okay = False + errcode = e.get_error_code() + if errcode == libvirt.VIR_ERR_OPERATION_INVALID: + # If the instance if already shut off, we get this: + # Code=55 Error=Requested operation is not valid: + # domain is not running + (state, _max_mem, _mem, _cpus, _t) = virt_dom.info() + if state == power_state.SHUTOFF: + is_okay = True + + if not is_okay: + LOG.warning(_("Error from libvirt during destroy of " + "%(instance_name)s. Code=%(errcode)s " + "Error=%(e)s") % + locals()) + raise + + try: + # NOTE(justinsb): We remove the domain definition. We probably + # would do better to keep it if cleanup=False (e.g. volumes?) + # (e.g. #2 - not losing machines on failure) + virt_dom.undefine() + except libvirt.libvirtError as e: + errcode = e.get_error_code() + LOG.warning(_("Error from libvirt during undefine of " + "%(instance_name)s. Code=%(errcode)s " + "Error=%(e)s") % + locals()) + raise + + def _wait_for_destroy(): + """Called at an interval until the VM is gone.""" + instance_name = instance['name'] + + try: + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("Instance %s destroyed successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_destroy) + timer.start(interval=0.5, now=True) self.firewall_driver.unfilter_instance(instance) @@ -354,7 +326,7 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def attach_volume(self, instance_name, device_path, mountpoint): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] if device_path.startswith('/dev/'): xml = """<disk type='block'> @@ -372,7 +344,7 @@ class LibvirtConnection(driver.ComputeDriver): name, mount_device) else: - raise exception.Invalid(_("Invalid device path %s") % device_path) + raise exception.InvalidDevicePath(path=device_path) virt_dom.attachDevice(xml) @@ -391,18 +363,18 @@ class LibvirtConnection(driver.ComputeDriver): if child.prop('dev') == device: return str(node) finally: - if ctx != None: + if ctx is not None: ctx.xpathFreeContext() - if doc != None: + if doc is not None: doc.freeDoc() @exception.wrap_exception def detach_volume(self, instance_name, mountpoint): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) mount_device = mountpoint.rpartition("/")[2] xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device) if not xml: - raise exception.NotFound(_("No disk at %s") % mount_device) + raise exception.DiskNotFound(location=mount_device) virt_dom.detachDevice(xml) @exception.wrap_exception @@ -415,7 +387,7 @@ class LibvirtConnection(driver.ComputeDriver): """ image_service = utils.import_object(FLAGS.image_service) - virt_dom = self._conn.lookupByName(instance['name']) + virt_dom = self._lookup_by_name(instance['name']) elevated = context.get_admin_context() base = image_service.show(elevated, instance['image_id']) @@ -423,9 +395,8 @@ class LibvirtConnection(driver.ComputeDriver): metadata = {'disk_format': base['disk_format'], 'container_format': base['container_format'], 'is_public': False, + 'name': '%s.%s' % (base['name'], image_id), 'properties': {'architecture': base['architecture'], - 'type': base['type'], - 'name': '%s.%s' % (base['name'], image_id), 'kernel_id': instance['kernel_id'], 'image_location': 'snapshot', 'image_state': 'available', @@ -452,12 +423,17 @@ class LibvirtConnection(driver.ComputeDriver): # Export the snapshot to a raw image temp_dir = tempfile.mkdtemp() out_path = os.path.join(temp_dir, snapshot_name) - qemu_img_cmd = '%s convert -f qcow2 -O raw -s %s %s %s' % ( - FLAGS.qemu_img, - snapshot_name, - disk_path, - out_path) - utils.execute(qemu_img_cmd) + qemu_img_cmd = (FLAGS.qemu_img, + 'convert', + '-f', + 'qcow2', + '-O', + 'raw', + '-s', + snapshot_name, + disk_path, + out_path) + utils.execute(*qemu_img_cmd) # Upload that image to the image service with open(out_path) as image_file: @@ -471,31 +447,43 @@ class LibvirtConnection(driver.ComputeDriver): @exception.wrap_exception def reboot(self, instance): + """Reboot a virtual machine, given an instance reference. + + This method actually destroys and re-creates the domain to ensure the + reboot happens, as the guest OS cannot ignore this action. + + """ + virt_dom = self._conn.lookupByName(instance['name']) + # NOTE(itoumsn): Use XML delived from the running instance + # instead of using to_xml(instance). This is almost the ultimate + # stupid workaround. + xml = virt_dom.XMLDesc(0) + # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is + # better because we cannot ensure flushing dirty buffers + # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, False) - xml = self.to_xml(instance) self.firewall_driver.setup_basic_filtering(instance) self.firewall_driver.prepare_instance_filter(instance) - self._conn.createXML(xml, 0) + self._create_new_domain(xml) self.firewall_driver.apply_instance_filter(instance) - timer = utils.LoopingCall(f=None) - def _wait_for_reboot(): + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] + try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.RUNNING: - LOG.debug(_('instance %s: rebooted'), instance['name']) - timer.stop() - except Exception, exn: - LOG.exception(_('_wait_for_reboot failed: %s'), exn) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) - timer.stop() + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s rebooted successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone - timer.f = _wait_for_reboot + timer = utils.LoopingCall(_wait_for_reboot) return timer.start(interval=0.5, now=True) @exception.wrap_exception @@ -515,7 +503,15 @@ class LibvirtConnection(driver.ComputeDriver): raise exception.ApiError("resume not supported for libvirt") @exception.wrap_exception - def rescue(self, instance, callback=None): + def rescue(self, instance): + """Loads a VM using rescue images. + + A rescue is normally performed when something goes wrong with the + primary images and data needs to be corrected/recovered. Rescuing + should not edit or over-ride the original image, only allow for + data recovery. + + """ self.destroy(instance, False) xml = self.to_xml(instance, rescue=True) @@ -523,31 +519,35 @@ class LibvirtConnection(driver.ComputeDriver): 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} self._create_image(instance, xml, '.rescue', rescue_images) - self._conn.createXML(xml, 0) - - timer = utils.LoopingCall(f=None) + self._create_new_domain(xml) def _wait_for_rescue(): + """Called at an interval until the VM is running again.""" + instance_name = instance['name'] + try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(None, instance['id'], state) - if state == power_state.RUNNING: - LOG.debug(_('instance %s: rescued'), instance['name']) - timer.stop() - except Exception, exn: - LOG.exception(_('_wait_for_rescue failed: %s'), exn) - db.instance_set_state(None, - instance['id'], - power_state.SHUTDOWN) - timer.stop() + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone - timer.f = _wait_for_rescue + if state == power_state.RUNNING: + msg = _("Instance %s rescued successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone + + timer = utils.LoopingCall(_wait_for_rescue) return timer.start(interval=0.5, now=True) @exception.wrap_exception - def unrescue(self, instance, callback=None): - # NOTE(vish): Because reboot destroys and recreates an instance using - # the normal xml file, we can just call reboot here + def unrescue(self, instance): + """Reboot the VM which is being rescued back into primary images. + + Because reboot destroys and re-creates instances, unresue should + simply call reboot. + + """ self.reboot(instance) @exception.wrap_exception @@ -558,37 +558,36 @@ class LibvirtConnection(driver.ComputeDriver): # for xenapi(tr3buchet) @exception.wrap_exception def spawn(self, instance, network_info=None): - xml = self.to_xml(instance, network_info) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.NOSTATE, - 'launching') + xml = self.to_xml(instance, False, network_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) - self._create_image(instance, xml, network_info) - self._conn.createXML(xml, 0) + self._create_image(instance, xml, network_info=network_info) + domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) self.firewall_driver.apply_instance_filter(instance) - timer = utils.LoopingCall(f=None) + if FLAGS.start_guests_on_host_boot: + LOG.debug(_("instance %s: setting autostart ON") % + instance['name']) + domain.setAutostart(1) def _wait_for_boot(): + """Called at an interval until the VM is running.""" + instance_name = instance['name'] + try: - state = self.get_info(instance['name'])['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) - if state == power_state.RUNNING: - LOG.debug(_('instance %s: booted'), instance['name']) - timer.stop() - except: - LOG.exception(_('instance %s: failed to boot'), - instance['name']) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) - timer.stop() + state = self.get_info(instance_name)['state'] + except exception.NotFound: + msg = _("During reboot, %s disappeared.") % instance_name + LOG.error(msg) + raise utils.LoopingCallDone + + if state == power_state.RUNNING: + msg = _("Instance %s spawned successfully.") % instance_name + LOG.info(msg) + raise utils.LoopingCallDone - timer.f = _wait_for_boot + timer = utils.LoopingCall(_wait_for_boot) return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): @@ -654,7 +653,7 @@ class LibvirtConnection(driver.ComputeDriver): raise Exception(_('Unable to find an open port')) def get_pty_for_instance(instance_name): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) dom = minidom.parseString(xml) @@ -676,10 +675,13 @@ class LibvirtConnection(driver.ComputeDriver): subprocess.Popen(cmd, shell=True) return {'token': token, 'host': host, 'port': port} + def get_host_ip_addr(self): + return FLAGS.my_ip + @exception.wrap_exception def get_vnc_console(self, instance): def get_vnc_port_for_instance(instance_name): - virt_dom = self._conn.lookupByName(instance_name) + virt_dom = self._lookup_by_name(instance_name) xml = virt_dom.XMLDesc(0) # TODO: use etree instead of minidom dom = minidom.parseString(xml) @@ -743,7 +745,7 @@ class LibvirtConnection(driver.ComputeDriver): def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, network_info=None): if not network_info: - network_info = _get_network_info(inst) + network_info = netutils.get_network_info(inst) if not suffix: suffix = '' @@ -797,7 +799,10 @@ class LibvirtConnection(driver.ComputeDriver): root_fname = '%08x' % int(disk_images['image_id']) size = FLAGS.minimum_root_size - if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue': + + inst_type_id = inst['instance_type_id'] + inst_type = instance_types.get_instance_type(inst_type_id) + if inst_type['name'] == 'm1.tiny' or suffix == '.rescue': size = None root_fname += "_sm" @@ -809,14 +814,13 @@ class LibvirtConnection(driver.ComputeDriver): user=user, project=project, size=size) - type_data = instance_types.get_instance_type(inst['instance_type']) - if type_data['local_gb']: + if inst_type['local_gb']: self._cache_image(fn=self._create_local, target=basepath('disk.local'), - fname="local_%s" % type_data['local_gb'], + fname="local_%s" % inst_type['local_gb'], cow=FLAGS.use_cow_images, - local_gb=type_data['local_gb']) + local_gb=inst_type['local_gb']) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -899,26 +903,16 @@ class LibvirtConnection(driver.ComputeDriver): mac_id = mapping['mac'].replace(':', '') if FLAGS.allow_project_net_traffic: + template = "<parameter name=\"%s\"value=\"%s\" />\n" + net, mask = netutils.get_net_and_mask(network['cidr']) + values = [("PROJNET", net), ("PROJMASK", mask)] if FLAGS.use_ipv6: - net, mask = _get_net_and_mask(network['cidr']) - net_v6, prefixlen_v6 = _get_net_and_prefixlen( + net_v6, prefixlen_v6 = netutils.get_net_and_prefixlen( network['cidr_v6']) - extra_params = ("<parameter name=\"PROJNET\" " - "value=\"%s\" />\n" - "<parameter name=\"PROJMASK\" " - "value=\"%s\" />\n" - "<parameter name=\"PROJNETV6\" " - "value=\"%s\" />\n" - "<parameter name=\"PROJMASKV6\" " - "value=\"%s\" />\n") % \ - (net, mask, net_v6, prefixlen_v6) - else: - net, mask = _get_net_and_mask(network['cidr']) - extra_params = ("<parameter name=\"PROJNET\" " - "value=\"%s\" />\n" - "<parameter name=\"PROJMASK\" " - "value=\"%s\" />\n") % \ - (net, mask) + values.extend([("PROJNETV6", net_v6), + ("PROJMASKV6", prefixlen_v6)]) + + extra_params = "".join([template % value for value in values]) else: extra_params = "\n" @@ -936,22 +930,18 @@ class LibvirtConnection(driver.ComputeDriver): return result - def to_xml(self, instance, rescue=False, network_info=None): - # TODO(termie): cache? - LOG.debug(_('instance %s: starting toXML method'), instance['name']) - + def _prepare_xml_info(self, instance, rescue=False, network_info=None): # TODO(adiantum) remove network_info creation code # when multinics will be completed if not network_info: - network_info = _get_network_info(instance) + network_info = netutils.get_network_info(instance) nics = [] for (network, mapping) in network_info: - nics.append(self._get_nic_for_xml(network, - mapping)) + nics.append(self._get_nic_for_xml(network, mapping)) # FIXME(vish): stick this in db - instance_type_name = instance['instance_type'] - instance_type = instance_types.get_instance_type(instance_type_name) + inst_type_id = instance['instance_type_id'] + inst_type = instance_types.get_instance_type(inst_type_id) if FLAGS.use_cow_images: driver_type = 'qcow2' @@ -962,10 +952,10 @@ class LibvirtConnection(driver.ComputeDriver): 'name': instance['name'], 'basepath': os.path.join(FLAGS.instances_path, instance['name']), - 'memory_kb': instance_type['memory_mb'] * 1024, - 'vcpus': instance_type['vcpus'], + 'memory_kb': inst_type['memory_mb'] * 1024, + 'vcpus': inst_type['vcpus'], 'rescue': rescue, - 'local': instance_type['local_gb'], + 'local': inst_type['local_gb'], 'driver_type': driver_type, 'nics': nics} @@ -980,18 +970,43 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk" xml_info['disk'] = xml_info['basepath'] + "/disk" + return xml_info + def to_xml(self, instance, rescue=False, network_info=None): + # TODO(termie): cache? + LOG.debug(_('instance %s: starting toXML method'), instance['name']) + xml_info = self._prepare_xml_info(instance, rescue, network_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - LOG.debug(_('instance %s: finished toXML method'), - instance['name']) + LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml - def get_info(self, instance_name): + def _lookup_by_name(self, instance_name): + """Retrieve libvirt domain object given an instance name. + + All libvirt error handling should be handled in this method and + relevant nova exceptions should be raised in response. + + """ try: - virt_dom = self._conn.lookupByName(instance_name) - except: - raise exception.NotFound(_("Instance %s not found") - % instance_name) + return self._conn.lookupByName(instance_name) + except libvirt.libvirtError as ex: + error_code = ex.get_error_code() + if error_code == libvirt.VIR_ERR_NO_DOMAIN: + raise exception.InstanceNotFound(instance_id=instance_name) + + msg = _("Error from libvirt while looking up %(instance_name)s: " + "[Error Code %(error_code)s] %(ex)s") % locals() + raise exception.Error(msg) + + def get_info(self, instance_name): + """Retrieve information from libvirt for a specific instance name. + + If a libvirt error is encountered during lookup, we might raise a + NotFound exception or Error exception depending on how severe the + libvirt error is. + + """ + virt_dom = self._lookup_by_name(instance_name) (state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info() return {'state': state, 'max_mem': max_mem, @@ -999,6 +1014,24 @@ class LibvirtConnection(driver.ComputeDriver): 'num_cpu': num_cpu, 'cpu_time': cpu_time} + def _create_new_domain(self, xml, persistent=True, launch_flags=0): + # NOTE(justinsb): libvirt has two types of domain: + # * a transient domain disappears when the guest is shutdown + # or the host is rebooted. + # * a permanent domain is not automatically deleted + # NOTE(justinsb): Even for ephemeral instances, transient seems risky + + if persistent: + # To create a persistent domain, first define it, then launch it. + domain = self._conn.defineXML(xml) + + domain.createWithFlags(launch_flags) + else: + # createXML call creates a transient domain + domain = self._conn.createXML(xml, launch_flags) + + return domain + def get_diagnostics(self, instance_name): raise exception.ApiError(_("diagnostics are not supported " "for libvirt")) @@ -1010,7 +1043,7 @@ class LibvirtConnection(driver.ComputeDriver): Returns a list of all block devices for this domain. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) doc = None @@ -1033,14 +1066,14 @@ class LibvirtConnection(driver.ComputeDriver): if child.name == 'target': devdst = child.prop('dev') - if devdst == None: + if devdst is None: continue disks.append(devdst) finally: - if ctx != None: + if ctx is not None: ctx.xpathFreeContext() - if doc != None: + if doc is not None: doc.freeDoc() return disks @@ -1052,7 +1085,7 @@ class LibvirtConnection(driver.ComputeDriver): Returns a list of all network interfaces for this instance. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) # TODO(devcamcar): Replace libxml2 with etree. xml = domain.XMLDesc(0) doc = None @@ -1075,14 +1108,14 @@ class LibvirtConnection(driver.ComputeDriver): if child.name == 'target': devdst = child.prop('dev') - if devdst == None: + if devdst is None: continue interfaces.append(devdst) finally: - if ctx != None: + if ctx is not None: ctx.xpathFreeContext() - if doc != None: + if doc is not None: doc.freeDoc() return interfaces @@ -1218,9 +1251,9 @@ class LibvirtConnection(driver.ComputeDriver): xml = libxml2.parseDoc(xml) nodes = xml.xpathEval('//host/cpu') if len(nodes) != 1: - raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1," - "but %d\n") % len(nodes) - + xml.serialize()) + reason = _("'<cpu>' must be 1, but %d\n") % len(nodes) + reason += xml.serialize() + raise exception.InvalidCPUInfo(reason=reason) cpu_info = dict() @@ -1249,9 +1282,8 @@ class LibvirtConnection(driver.ComputeDriver): tkeys = topology.keys() if set(tkeys) != set(keys): ks = ', '.join(keys) - raise exception.Invalid(_("Invalid xml: topology" - "(%(topology)s) must have " - "%(ks)s") % locals()) + reason = _("topology (%(topology)s) must have %(ks)s") + raise exception.InvalidCPUInfo(reason=reason % locals()) feature_nodes = xml.xpathEval('//host/cpu/feature') features = list() @@ -1267,7 +1299,7 @@ class LibvirtConnection(driver.ComputeDriver): Note that this function takes an instance name, not an Instance, so that it can be called by monitor. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) return domain.blockStats(disk) def interface_stats(self, instance_name, interface): @@ -1275,7 +1307,7 @@ class LibvirtConnection(driver.ComputeDriver): Note that this function takes an instance name, not an Instance, so that it can be called by monitor. """ - domain = self._conn.lookupByName(instance_name) + domain = self._lookup_by_name(instance_name) return domain.interfaceStats(interface) def get_console_pool_info(self, console_type): @@ -1309,9 +1341,7 @@ class LibvirtConnection(driver.ComputeDriver): try: service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] except exception.NotFound: - raise exception.Invalid(_("Cannot update compute manager " - "specific info, because no service " - "record was found.")) + raise exception.ComputeServiceUnavailable(host=host) # Updating host information dic = {'vcpus': self.get_vcpu_total(), @@ -1364,7 +1394,7 @@ class LibvirtConnection(driver.ComputeDriver): raise if ret <= 0: - raise exception.Invalid(m % locals()) + raise exception.InvalidCPUInfo(reason=m % locals()) return @@ -1405,18 +1435,13 @@ class LibvirtConnection(driver.ComputeDriver): # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) while timeout_count: - try: - filter_name = 'nova-instance-%s' % instance_ref.name - self._conn.nwfilterLookupByName(filter_name) + if self.firewall_driver.instance_filter_exists(instance_ref): break - except libvirt.libvirtError: - timeout_count.pop() - if len(timeout_count) == 0: - ec2_id = instance_ref['hostname'] - iname = instance_ref.name - msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)') - raise exception.Error(msg % locals()) - time.sleep(1) + timeout_count.pop() + if len(timeout_count) == 0: + msg = _('Timeout migrating for %s. nwfilter not found.') + raise exception.Error(msg % instance_ref.name) + time.sleep(1) def live_migration(self, ctxt, instance_ref, dest, post_method, recover_method): @@ -1479,7 +1504,7 @@ class LibvirtConnection(driver.ComputeDriver): FLAGS.live_migration_bandwidth) except Exception: - recover_method(ctxt, instance_ref) + recover_method(ctxt, instance_ref, dest=dest) raise # Waiting for completion of live_migration. @@ -1500,725 +1525,11 @@ class LibvirtConnection(driver.ComputeDriver): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref) - -class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): - """Prepare filters for the instance. - - At this point, the instance isn't running yet.""" - raise NotImplementedError() - - def unfilter_instance(self, instance): - """Stop filtering instance""" - raise NotImplementedError() - - def apply_instance_filter(self, instance): - """Apply instance filter. - - Once this method returns, the instance should be firewalled - appropriately. This method should as far as possible be a - no-op. It's vastly preferred to get everything set up in - prepare_instance_filter. - """ - raise NotImplementedError() - - def refresh_security_group_rules(self, security_group_id): - """Refresh security group rules from data store - - Gets called when a rule has been added to or removed from - the security group.""" - raise NotImplementedError() - - def refresh_security_group_members(self, security_group_id): - """Refresh security group members from data store - - Gets called when an instance gets added to or removed from - the security group.""" - raise NotImplementedError() - - def refresh_provider_fw_rules(self): - """Refresh common rules for all hosts/instances from data store. - - Gets called when a rule has been added to or removed from - the list of rules (via admin api). - - """ - raise NotImplementedError() - - def setup_basic_filtering(self, instance, network_info=None): - """Create rules to block spoofing and allow dhcp. - - This gets called when spawning an instance, before - :method:`prepare_instance_filter`. - - """ - raise NotImplementedError() - - -class NWFilterFirewall(FirewallDriver): - """ - This class implements a network filtering mechanism versatile - enough for EC2 style Security Group filtering by leveraging - libvirt's nwfilter. - - First, all instances get a filter ("nova-base-filter") applied. - This filter provides some basic security such as protection against - MAC spoofing, IP spoofing, and ARP spoofing. - - This filter drops all incoming ipv4 and ipv6 connections. - Outgoing connections are never blocked. - - Second, every security group maps to a nwfilter filter(*). - NWFilters can be updated at runtime and changes are applied - immediately, so changes to security groups can be applied at - runtime (as mandated by the spec). - - Security group rules are named "nova-secgroup-<id>" where <id> - is the internal id of the security group. They're applied only on - hosts that have instances in the security group in question. - - Updates to security groups are done by updating the data model - (in response to API calls) followed by a request sent to all - the nodes with instances in the security group to refresh the - security group. - - Each instance has its own NWFilter, which references the above - mentioned security group NWFilters. This was done because - interfaces can only reference one filter while filters can - reference multiple other filters. This has the added benefit of - actually being able to add and remove security groups from an - instance at run time. This functionality is not exposed anywhere, - though. - - Outstanding questions: - - The name is unique, so would there be any good reason to sync - the uuid across the nodes (by assigning it from the datamodel)? - - - (*) This sentence brought to you by the redundancy department of - redundancy. - - """ - - def __init__(self, get_connection, **kwargs): - self._libvirt_get_connection = get_connection - self.static_filters_configured = False - self.handle_security_groups = False - - def apply_instance_filter(self, instance): - """No-op. Everything is done in prepare_instance_filter""" + def update_host_status(self): + """See xenapi_conn.py implementation.""" pass - def _get_connection(self): - return self._libvirt_get_connection() - _conn = property(_get_connection) - - def nova_dhcp_filter(self): - """The standard allow-dhcp-server filter is an <ip> one, so it uses - ebtables to allow traffic through. Without a corresponding rule in - iptables, it'll get blocked anyway.""" - - return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> - <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> - <rule action='accept' direction='out' - priority='100'> - <udp srcipaddr='0.0.0.0' - dstipaddr='255.255.255.255' - srcportstart='68' - dstportstart='67'/> - </rule> - <rule action='accept' direction='in' - priority='100'> - <udp srcipaddr='$DHCPSERVER' - srcportstart='67' - dstportstart='68'/> - </rule> - </filter>''' - - def nova_ra_filter(self): - return '''<filter name='nova-allow-ra-server' chain='root'> - <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid> - <rule action='accept' direction='inout' - priority='100'> - <icmpv6 srcipaddr='$RASERVER'/> - </rule> - </filter>''' - - def setup_basic_filtering(self, instance, network_info=None): - """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" - logging.info('called setup_basic_filtering in nwfilter') - - if not network_info: - network_info = _get_network_info(instance) - - if self.handle_security_groups: - # No point in setting up a filter set that we'll be overriding - # anyway. - return - - logging.info('ensuring static filters') - self._ensure_static_filters() - - for (network, mapping) in network_info: - nic_id = mapping['mac'].replace(':', '') - instance_filter_name = self._instance_filter_name(instance, nic_id) - self._define_filter(self._filter_container(instance_filter_name, - ['nova-base'])) - - def _ensure_static_filters(self): - """Static filters are filters that have no need to be IP aware. - - There is no configuration or tuneability of these filters, so they - can be set up once and forgotten about. - - """ - - if self.static_filters_configured: - return - - self._define_filter(self._filter_container('nova-base', - ['no-mac-spoofing', - 'no-ip-spoofing', - 'no-arp-spoofing', - 'allow-dhcp-server'])) - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_ra_filter) - self._define_filter(self.nova_vpn_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) - if FLAGS.use_ipv6: - self._define_filter(self.nova_project_filter_v6) - - self.static_filters_configured = True - - def _filter_container(self, name, filters): - xml = '''<filter name='%s' chain='root'>%s</filter>''' % ( - name, - ''.join(["<filterref filter='%s'/>" % (f,) for f in filters])) - return xml - - nova_vpn_filter = '''<filter name='nova-vpn' chain='root'> - <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid> - <filterref filter='allow-dhcp-server'/> - <filterref filter='nova-allow-dhcp-server'/> - <filterref filter='nova-base-ipv4'/> - <filterref filter='nova-base-ipv6'/> - </filter>''' - - def nova_base_ipv4_filter(self): - retval = "<filter name='nova-base-ipv4' chain='ipv4'>" - for protocol in ['tcp', 'udp', 'icmp']: - for direction, action, priority in [('out', 'accept', 399), - ('in', 'drop', 400)]: - retval += """<rule action='%s' direction='%s' priority='%d'> - <%s /> - </rule>""" % (action, direction, - priority, protocol) - retval += '</filter>' - return retval - - def nova_base_ipv6_filter(self): - retval = "<filter name='nova-base-ipv6' chain='ipv6'>" - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: - for direction, action, priority in [('out', 'accept', 399), - ('in', 'drop', 400)]: - retval += """<rule action='%s' direction='%s' priority='%d'> - <%s /> - </rule>""" % (action, direction, - priority, protocol) - retval += '</filter>' - return retval - - def nova_project_filter(self): - retval = "<filter name='nova-project' chain='ipv4'>" - for protocol in ['tcp', 'udp', 'icmp']: - retval += """<rule action='accept' direction='in' priority='200'> - <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> - </rule>""" % protocol - retval += '</filter>' - return retval - - def nova_project_filter_v6(self): - retval = "<filter name='nova-project-v6' chain='ipv6'>" - for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: - retval += """<rule action='accept' direction='inout' - priority='200'> - <%s srcipaddr='$PROJNETV6' - srcipmask='$PROJMASKV6' /> - </rule>""" % (protocol) - retval += '</filter>' - return retval - - def _define_filter(self, xml): - if callable(xml): - xml = xml() - # execute in a native thread and block current greenthread until done - tpool.execute(self._conn.nwfilterDefineXML, xml) - - def unfilter_instance(self, instance): - # Nothing to do + def get_host_stats(self, refresh=False): + """See xenapi_conn.py implementation.""" pass - def prepare_instance_filter(self, instance, network_info=None): - """Creates an NWFilter for the given instance. - - In the process, it makes sure the filters for the provider blocks, - security groups, and base filter are all in place. - - """ - if not network_info: - network_info = _get_network_info(instance) - if instance['image_id'] == FLAGS.vpn_image_id: - base_filter = 'nova-vpn' - else: - base_filter = 'nova-base' - - self.refresh_provider_fw_rules() - - ctxt = context.get_admin_context() - - instance_secgroup_filter_name = \ - '%s-secgroup' % (self._instance_filter_name(instance)) - - instance_secgroup_filter_children = ['nova-base-ipv4', - 'nova-base-ipv6', - 'nova-allow-dhcp-server'] - - for security_group in \ - db.security_group_get_by_instance(ctxt, instance['id']): - - self.refresh_security_group_rules(security_group['id']) - - instance_secgroup_filter_children += [('nova-secgroup-%s' % - security_group['id'])] - - self._define_filter( - self._filter_container(instance_secgroup_filter_name, - instance_secgroup_filter_children)) - - for (network, mapping) in network_info: - nic_id = mapping['mac'].replace(':', '') - instance_filter_name = self._instance_filter_name(instance, nic_id) - instance_filter_children = [base_filter, 'nova-provider-rules', - instance_secgroup_filter_name] - - if FLAGS.use_ipv6: - gateway_v6 = network['gateway_v6'] - - if gateway_v6: - instance_secgroup_filter_children += \ - ['nova-allow-ra-server'] - - if FLAGS.allow_project_net_traffic: - instance_filter_children += ['nova-project'] - if FLAGS.use_ipv6: - instance_filter_children += ['nova-project-v6'] - - self._define_filter( - self._filter_container(instance_filter_name, - instance_filter_children)) - - return - - def refresh_security_group_rules(self, security_group_id): - return self._define_filter( - self.security_group_to_nwfilter_xml(security_group_id)) - - def refresh_provider_fw_rules(self): - """Update rules for all instances. - - This is part of the FirewallDriver API and is called when the - provider firewall rules change in the database. In the - `prepare_instance_filter` we add a reference to the - 'nova-provider-rules' filter for each instance's firewall, and - by changing that filter we update them all. - - """ - xml = self.provider_fw_to_nwfilter_xml() - return self._define_filter(xml) - - def security_group_to_nwfilter_xml(self, security_group_id): - security_group = db.security_group_get(context.get_admin_context(), - security_group_id) - rule_xml = "" - v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} - for rule in security_group.rules: - rule_xml += "<rule action='accept' direction='in' priority='300'>" - if rule.cidr: - version = _get_ip_version(rule.cidr) - if(FLAGS.use_ipv6 and version == 6): - net, prefixlen = _get_net_and_prefixlen(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (v6protocol[rule.protocol], net, prefixlen) - else: - net, mask = _get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) - if rule.protocol in ['tcp', 'udp']: - rule_xml += "dstportstart='%s' dstportend='%s' " % \ - (rule.from_port, rule.to_port) - elif rule.protocol == 'icmp': - LOG.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r', rule.protocol, - rule.from_port, rule.to_port) - if rule.from_port != -1: - rule_xml += "type='%s' " % rule.from_port - if rule.to_port != -1: - rule_xml += "code='%s' " % rule.to_port - - rule_xml += '/>\n' - rule_xml += "</rule>\n" - xml = "<filter name='nova-secgroup-%s' " % security_group_id - if(FLAGS.use_ipv6): - xml += "chain='root'>%s</filter>" % rule_xml - else: - xml += "chain='ipv4'>%s</filter>" % rule_xml - return xml - - def provider_fw_to_nwfilter_xml(self): - """Compose a filter of drop rules from specified cidrs.""" - rule_xml = "" - v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} - rules = db.provider_fw_rule_get_all(context.get_admin_context()) - for rule in rules: - rule_xml += "<rule action='block' direction='in' priority='150'>" - version = _get_ip_version(rule.cidr) - if(FLAGS.use_ipv6 and version == 6): - net, prefixlen = _get_net_and_prefixlen(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (v6protocol[rule.protocol], net, prefixlen) - else: - net, mask = _get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) - if rule.protocol in ['tcp', 'udp']: - rule_xml += "dstportstart='%s' dstportend='%s' " % \ - (rule.from_port, rule.to_port) - elif rule.protocol == 'icmp': - LOG.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r', rule.protocol, - rule.from_port, rule.to_port) - if rule.from_port != -1: - rule_xml += "type='%s' " % rule.from_port - if rule.to_port != -1: - rule_xml += "code='%s' " % rule.to_port - - rule_xml += '/>\n' - rule_xml += "</rule>\n" - xml = "<filter name='nova-provider-rules' " - if(FLAGS.use_ipv6): - xml += "chain='root'>%s</filter>" % rule_xml - else: - xml += "chain='ipv4'>%s</filter>" % rule_xml - return xml - - def _instance_filter_name(self, instance, nic_id=None): - if not nic_id: - return 'nova-instance-%s' % (instance['name']) - return 'nova-instance-%s-%s' % (instance['name'], nic_id) - - -class IptablesFirewallDriver(FirewallDriver): - def __init__(self, execute=None, **kwargs): - from nova.network import linux_net - self.iptables = linux_net.iptables_manager - self.instances = {} - self.nwfilter = NWFilterFirewall(kwargs['get_connection']) - self.basicly_filtered = False - - self.iptables.ipv4['filter'].add_chain('sg-fallback') - self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') - self.iptables.ipv6['filter'].add_chain('sg-fallback') - self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - - def setup_basic_filtering(self, instance, network_info=None): - """Set up provider rules and basic NWFilter.""" - if not network_info: - network_info = _get_network_info(instance) - self.nwfilter.setup_basic_filtering(instance, network_info) - if not self.basicly_filtered: - LOG.debug("Setup Basic Filtering") - self.refresh_provider_fw_rules() - self.basicly_filtered = True - - def apply_instance_filter(self, instance): - """No-op. Everything is done in prepare_instance_filter""" - pass - - def unfilter_instance(self, instance): - if self.instances.pop(instance['id'], None): - self.remove_filters_for_instance(instance) - self.iptables.apply() - else: - LOG.info(_('Attempted to unfilter instance %s which is not ' - 'filtered'), instance['id']) - - def prepare_instance_filter(self, instance, network_info=None): - if not network_info: - network_info = _get_network_info(instance) - self.instances[instance['id']] = instance - self.add_filters_for_instance(instance, network_info) - self.iptables.apply() - - def add_filters_for_instance(self, instance, network_info=None): - if not network_info: - network_info = _get_network_info(instance) - chain_name = self._instance_chain_name(instance) - - self.iptables.ipv4['filter'].add_chain(chain_name) - self.iptables.ipv4['filter'].empty_chain(chain_name) - - ips_v4 = [ip['ip'] for (_, mapping) in network_info - for ip in mapping['ips']] - - for ipv4_address in ips_v4: - self.iptables.ipv4['filter'].add_rule('local', - '-d %s -j $%s' % - (ipv4_address, chain_name)) - - if FLAGS.use_ipv6: - self.iptables.ipv6['filter'].add_chain(chain_name) - self.iptables.ipv6['filter'].empty_chain(chain_name) - ips_v6 = [ip['ip'] for (_, mapping) in network_info - for ip in mapping['ip6s']] - - for ipv6_address in ips_v6: - self.iptables.ipv6['filter'].add_rule('local', - '-d %s -j $%s' % - (ipv6_address, - chain_name)) - - ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) - for rule in ipv4_rules: - self.iptables.ipv4['filter'].add_rule(chain_name, rule) - - if FLAGS.use_ipv6: - for rule in ipv6_rules: - self.iptables.ipv6['filter'].add_rule(chain_name, rule) - - def remove_filters_for_instance(self, instance): - chain_name = self._instance_chain_name(instance) - - self.iptables.ipv4['filter'].remove_chain(chain_name) - if FLAGS.use_ipv6: - self.iptables.ipv6['filter'].remove_chain(chain_name) - - def instance_rules(self, instance, network_info=None): - if not network_info: - network_info = _get_network_info(instance) - ctxt = context.get_admin_context() - - ipv4_rules = [] - ipv6_rules = [] - - # Always drop invalid packets - ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] - ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] - - # Pass through provider-wide drops - ipv4_rules += ['-j $provider'] - ipv6_rules += ['-j $provider'] - - # Allow established connections - ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] - ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] - - dhcp_servers = [network['gateway'] for (network, _m) in network_info] - - for dhcp_server in dhcp_servers: - ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' - '-j ACCEPT' % (dhcp_server,)) - - #Allow project network traffic - if FLAGS.allow_project_net_traffic: - cidrs = [network['cidr'] for (network, _m) in network_info] - for cidr in cidrs: - ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) - - # We wrap these in FLAGS.use_ipv6 because they might cause - # a DB lookup. The other ones are just list operations, so - # they're not worth the clutter. - if FLAGS.use_ipv6: - # Allow RA responses - gateways_v6 = [network['gateway_v6'] for (network, _m) in - network_info] - for gateway_v6 in gateways_v6: - ipv6_rules.append( - '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) - - #Allow project network traffic - if FLAGS.allow_project_net_traffic: - cidrv6s = [network['cidr_v6'] for (network, _m) - in network_info] - - for cidrv6 in cidrv6s: - ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) - - security_groups = db.security_group_get_by_instance(ctxt, - instance['id']) - - # then, security group chains and rules - for security_group in security_groups: - rules = db.security_group_rule_get_by_security_group(ctxt, - security_group['id']) - - for rule in rules: - LOG.debug(_("Adding security group rule: %r"), rule) - - if not rule.cidr: - # Eventually, a mechanism to grant access for security - # groups will turn up here. It'll use ipsets. - continue - - version = _get_ip_version(rule.cidr) - if version == 4: - fw_rules = ipv4_rules - else: - fw_rules = ipv6_rules - - protocol = rule.protocol - if version == 6 and rule.protocol == 'icmp': - protocol = 'icmpv6' - - args = ['-p', protocol, '-s', rule.cidr] - - if rule.protocol in ['udp', 'tcp']: - if rule.from_port == rule.to_port: - args += ['--dport', '%s' % (rule.from_port,)] - else: - args += ['-m', 'multiport', - '--dports', '%s:%s' % (rule.from_port, - rule.to_port)] - elif rule.protocol == 'icmp': - icmp_type = rule.from_port - icmp_code = rule.to_port - - if icmp_type == -1: - icmp_type_arg = None - else: - icmp_type_arg = '%s' % icmp_type - if not icmp_code == -1: - icmp_type_arg += '/%s' % icmp_code - - if icmp_type_arg: - if version == 4: - args += ['-m', 'icmp', '--icmp-type', - icmp_type_arg] - elif version == 6: - args += ['-m', 'icmp6', '--icmpv6-type', - icmp_type_arg] - - args += ['-j ACCEPT'] - fw_rules += [' '.join(args)] - - ipv4_rules += ['-j $sg-fallback'] - ipv6_rules += ['-j $sg-fallback'] - - return ipv4_rules, ipv6_rules - - def refresh_security_group_members(self, security_group): - pass - - def refresh_security_group_rules(self, security_group): - self.do_refresh_security_group_rules(security_group) - self.iptables.apply() - - @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, security_group): - for instance in self.instances.values(): - self.remove_filters_for_instance(instance) - self.add_filters_for_instance(instance) - - def refresh_provider_fw_rules(self): - """See class:FirewallDriver: docs.""" - self._do_refresh_provider_fw_rules() - self.iptables.apply() - - @utils.synchronized('iptables', external=True) - def _do_refresh_provider_fw_rules(self): - """Internal, synchronized version of refresh_provider_fw_rules.""" - self._purge_provider_fw_rules() - self._build_provider_fw_rules() - - def _purge_provider_fw_rules(self): - """Remove all rules from the provider chains.""" - self.iptables.ipv4['filter'].empty_chain('provider') - if FLAGS.use_ipv6: - self.iptables.ipv6['filter'].empty_chain('provider') - - def _build_provider_fw_rules(self): - """Create all rules for the provider IP DROPs.""" - self.iptables.ipv4['filter'].add_chain('provider') - if FLAGS.use_ipv6: - self.iptables.ipv6['filter'].add_chain('provider') - ipv4_rules, ipv6_rules = self._provider_rules() - for rule in ipv4_rules: - self.iptables.ipv4['filter'].add_rule('provider', rule) - - if FLAGS.use_ipv6: - for rule in ipv6_rules: - self.iptables.ipv6['filter'].add_rule('provider', rule) - - def _provider_rules(self): - """Generate a list of rules from provider for IP4 & IP6.""" - ctxt = context.get_admin_context() - ipv4_rules = [] - ipv6_rules = [] - rules = db.provider_fw_rule_get_all(ctxt) - for rule in rules: - LOG.debug(_('Adding prvider rule: %r'), rule) - - if not rule.cidr: - # Eventually, a mechanism to grant access for security - # groups will turn up here. It'll use ipsets. - continue - - version = _get_ip_version(rule.cidr) - if version == 4: - fw_rules = ipv4_rules - else: - fw_rules = ipv6_rules - - protocol = rule.protocol - if version == 6 and protocol == 'icmp': - protocol = 'icmpv6' - - args = ['-p', protocol, '-s', rule.cidr] - - if protocol in ['udp', 'tcp']: - if rule.from_port == rule.to_port: - args += ['--dport', '%s' % (rule.from_port,)] - else: - args += ['-m', 'multiport', - '--dports', '%s:%s' % (rule.from_port, - rule.to_port)] - elif protocol == 'icmp': - icmp_type = rule.from_port - icmp_code = rule.to_port - - if icmp_type == -1: - icmp_type_arg = None - else: - icmp_type_arg = '%s' % icmp_type - if not icmp_code == -1: - icmp_type_arg += '/%s' % icmp_code - - if icmp_type_arg: - if version == 4: - args += ['-m', 'icmp', '--icmp-type', - icmp_type_arg] - elif version == 6: - args += ['-m', 'icmp6', '--icmpv6-type', - icmp_type_arg] - args += ['-j DROP'] - fw_rules += [' '.join(args)] - return ipv4_rules, ipv6_rules - - def _security_group_chain_name(self, security_group_id): - return 'nova-sg-%s' % (security_group_id,) - - def _instance_chain_name(self, instance): - return 'inst-%s' % (instance['id'],) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py new file mode 100644 index 000000000..fd4c120a6 --- /dev/null +++ b/nova/virt/libvirt/firewall.py @@ -0,0 +1,798 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from eventlet import tpool + +from nova import context +from nova import db +from nova import flags +from nova import log as logging +from nova import utils +from nova.virt.libvirt import netutils + + +LOG = logging.getLogger("nova.virt.libvirt.firewall") +FLAGS = flags.FLAGS + + +try: + import libvirt +except ImportError: + LOG.warn(_("Libvirt module could not be loaded. NWFilterFirewall will " + "not work correctly.")) + + +class FirewallDriver(object): + def prepare_instance_filter(self, instance, network_info=None): + """Prepare filters for the instance. + + At this point, the instance isn't running yet.""" + raise NotImplementedError() + + def unfilter_instance(self, instance): + """Stop filtering instance""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, + security_group_id, + network_info=None): + """Refresh security group rules from data store + + Gets called when a rule has been added to or removed from + the security group.""" + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store + + Gets called when an instance gets added to or removed from + the security group.""" + raise NotImplementedError() + + def refresh_provider_fw_rules(self): + """Refresh common rules for all hosts/instances from data store. + + Gets called when a rule has been added to or removed from + the list of rules (via admin api). + + """ + raise NotImplementedError() + + def setup_basic_filtering(self, instance, network_info=None): + """Create rules to block spoofing and allow dhcp. + + This gets called when spawning an instance, before + :method:`prepare_instance_filter`. + + """ + raise NotImplementedError() + + def instance_filter_exists(self, instance): + """Check nova-instance-instance-xxx exists""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): + """ + This class implements a network filtering mechanism versatile + enough for EC2 style Security Group filtering by leveraging + libvirt's nwfilter. + + First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + + This filter drops all incoming ipv4 and ipv6 connections. + Outgoing connections are never blocked. + + Second, every security group maps to a nwfilter filter(*). + NWFilters can be updated at runtime and changes are applied + immediately, so changes to security groups can be applied at + runtime (as mandated by the spec). + + Security group rules are named "nova-secgroup-<id>" where <id> + is the internal id of the security group. They're applied only on + hosts that have instances in the security group in question. + + Updates to security groups are done by updating the data model + (in response to API calls) followed by a request sent to all + the nodes with instances in the security group to refresh the + security group. + + Each instance has its own NWFilter, which references the above + mentioned security group NWFilters. This was done because + interfaces can only reference one filter while filters can + reference multiple other filters. This has the added benefit of + actually being able to add and remove security groups from an + instance at run time. This functionality is not exposed anywhere, + though. + + Outstanding questions: + + The name is unique, so would there be any good reason to sync + the uuid across the nodes (by assigning it from the datamodel)? + + + (*) This sentence brought to you by the redundancy department of + redundancy. + + """ + + def __init__(self, get_connection, **kwargs): + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False + + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + + def _get_connection(self): + return self._libvirt_get_connection() + _conn = property(_get_connection) + + def nova_dhcp_filter(self): + """The standard allow-dhcp-server filter is an <ip> one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway.""" + + return '''<filter name='nova-allow-dhcp-server' chain='ipv4'> + <uuid>891e4787-e5c0-d59b-cbd6-41bc3c6b36fc</uuid> + <rule action='accept' direction='out' + priority='100'> + <udp srcipaddr='0.0.0.0' + dstipaddr='255.255.255.255' + srcportstart='68' + dstportstart='67'/> + </rule> + <rule action='accept' direction='in' + priority='100'> + <udp srcipaddr='$DHCPSERVER' + srcportstart='67' + dstportstart='68'/> + </rule> + </filter>''' + + def nova_ra_filter(self): + return '''<filter name='nova-allow-ra-server' chain='root'> + <uuid>d707fa71-4fb5-4b27-9ab7-ba5ca19c8804</uuid> + <rule action='accept' direction='inout' + priority='100'> + <icmpv6 srcipaddr='$RASERVER'/> + </rule> + </filter>''' + + def setup_basic_filtering(self, instance, network_info=None): + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + logging.info('called setup_basic_filtering in nwfilter') + + if not network_info: + network_info = netutils.get_network_info(instance) + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + self._define_filter(self._filter_container(instance_filter_name, + [base_filter])) + + def _ensure_static_filters(self): + """Static filters are filters that have no need to be IP aware. + + There is no configuration or tuneability of these filters, so they + can be set up once and forgotten about. + + """ + + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self._filter_container('nova-vpn', + ['allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_ra_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + if FLAGS.use_ipv6: + self._define_filter(self.nova_project_filter_v6) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''<filter name='%s' chain='root'>%s</filter>''' % ( + name, + ''.join(["<filterref filter='%s'/>" % (f,) for f in filters])) + return xml + + def nova_base_ipv4_filter(self): + retval = "<filter name='nova-base-ipv4' chain='ipv4'>" + for protocol in ['tcp', 'udp', 'icmp']: + for direction, action, priority in [('out', 'accept', 399), + ('in', 'drop', 400)]: + retval += """<rule action='%s' direction='%s' priority='%d'> + <%s /> + </rule>""" % (action, direction, + priority, protocol) + retval += '</filter>' + return retval + + def nova_base_ipv6_filter(self): + retval = "<filter name='nova-base-ipv6' chain='ipv6'>" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + for direction, action, priority in [('out', 'accept', 399), + ('in', 'drop', 400)]: + retval += """<rule action='%s' direction='%s' priority='%d'> + <%s /> + </rule>""" % (action, direction, + priority, protocol) + retval += '</filter>' + return retval + + def nova_project_filter(self): + retval = "<filter name='nova-project' chain='ipv4'>" + for protocol in ['tcp', 'udp', 'icmp']: + retval += """<rule action='accept' direction='in' priority='200'> + <%s srcipaddr='$PROJNET' srcipmask='$PROJMASK' /> + </rule>""" % protocol + retval += '</filter>' + return retval + + def nova_project_filter_v6(self): + retval = "<filter name='nova-project-v6' chain='ipv6'>" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + retval += """<rule action='accept' direction='inout' + priority='200'> + <%s srcipaddr='$PROJNETV6' + srcipmask='$PROJMASKV6' /> + </rule>""" % (protocol) + retval += '</filter>' + return retval + + def _define_filter(self, xml): + if callable(xml): + xml = xml() + # execute in a native thread and block current greenthread until done + tpool.execute(self._conn.nwfilterDefineXML, xml) + + def unfilter_instance(self, instance): + # Nothing to do + pass + + def prepare_instance_filter(self, instance, network_info=None): + """Creates an NWFilter for the given instance. + + In the process, it makes sure the filters for the provider blocks, + security groups, and base filter are all in place. + + """ + if not network_info: + network_info = netutils.get_network_info(instance) + + self.refresh_provider_fw_rules() + + ctxt = context.get_admin_context() + + instance_secgroup_filter_name = \ + '%s-secgroup' % (self._instance_filter_name(instance)) + + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + + if FLAGS.use_ipv6: + networks = [network for (network, _m) in network_info if + network['gateway_v6']] + + if networks: + instance_secgroup_filter_children.\ + append('nova-allow-ra-server') + + for security_group in \ + db.security_group_get_by_instance(ctxt, instance['id']): + + self.refresh_security_group_rules(security_group['id']) + + instance_secgroup_filter_children.append('nova-secgroup-%s' % + security_group['id']) + + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) + + network_filters = self.\ + _create_network_filters(instance, network_info, + instance_secgroup_filter_name) + + for (name, children) in network_filters: + self._define_filters(name, children) + + def _create_network_filters(self, instance, network_info, + instance_secgroup_filter_name): + if instance['image_id'] == str(FLAGS.vpn_image_id): + base_filter = 'nova-vpn' + else: + base_filter = 'nova-base' + + result = [] + for (_n, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + instance_filter_children = [base_filter, 'nova-provider-rules', + instance_secgroup_filter_name] + + if FLAGS.allow_project_net_traffic: + instance_filter_children.append('nova-project') + if FLAGS.use_ipv6: + instance_filter_children.append('nova-project-v6') + + result.append((instance_filter_name, instance_filter_children)) + + return result + + def _define_filters(self, filter_name, filter_children): + self._define_fitler(self._filter_container(filter_name, + filter_children)) + + def refresh_security_group_rules(self, + security_group_id, + network_info=None): + return self._define_filter( + self.security_group_to_nwfilter_xml(security_group_id)) + + def refresh_provider_fw_rules(self): + """Update rules for all instances. + + This is part of the FirewallDriver API and is called when the + provider firewall rules change in the database. In the + `prepare_instance_filter` we add a reference to the + 'nova-provider-rules' filter for each instance's firewall, and + by changing that filter we update them all. + + """ + xml = self.provider_fw_to_nwfilter_xml() + return self._define_filter(xml) + + def security_group_to_nwfilter_xml(self, security_group_id): + security_group = db.security_group_get(context.get_admin_context(), + security_group_id) + rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} + for rule in security_group.rules: + rule_xml += "<rule action='accept' direction='in' priority='300'>" + if rule.cidr: + version = netutils.get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = netutils.get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) + if rule.protocol in ['tcp', 'udp']: + rule_xml += "dstportstart='%s' dstportend='%s' " % \ + (rule.from_port, rule.to_port) + elif rule.protocol == 'icmp': + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) + if rule.from_port != -1: + rule_xml += "type='%s' " % rule.from_port + if rule.to_port != -1: + rule_xml += "code='%s' " % rule.to_port + + rule_xml += '/>\n' + rule_xml += "</rule>\n" + xml = "<filter name='nova-secgroup-%s' " % security_group_id + if(FLAGS.use_ipv6): + xml += "chain='root'>%s</filter>" % rule_xml + else: + xml += "chain='ipv4'>%s</filter>" % rule_xml + return xml + + def provider_fw_to_nwfilter_xml(self): + """Compose a filter of drop rules from specified cidrs.""" + rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} + rules = db.provider_fw_rule_get_all(context.get_admin_context()) + for rule in rules: + rule_xml += "<rule action='block' direction='in' priority='150'>" + version = netutils.get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = netutils.get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) + if rule.protocol in ['tcp', 'udp']: + rule_xml += "dstportstart='%s' dstportend='%s' " % \ + (rule.from_port, rule.to_port) + elif rule.protocol == 'icmp': + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) + if rule.from_port != -1: + rule_xml += "type='%s' " % rule.from_port + if rule.to_port != -1: + rule_xml += "code='%s' " % rule.to_port + + rule_xml += '/>\n' + rule_xml += "</rule>\n" + xml = "<filter name='nova-provider-rules' " + if(FLAGS.use_ipv6): + xml += "chain='root'>%s</filter>" % rule_xml + else: + xml += "chain='ipv4'>%s</filter>" % rule_xml + return xml + + def _instance_filter_name(self, instance, nic_id=None): + if not nic_id: + return 'nova-instance-%s' % (instance['name']) + return 'nova-instance-%s-%s' % (instance['name'], nic_id) + + def instance_filter_exists(self, instance): + """Check nova-instance-instance-xxx exists""" + network_info = netutils.get_network_info(instance) + for (network, mapping) in network_info: + nic_id = mapping['mac'].replace(':', '') + instance_filter_name = self._instance_filter_name(instance, nic_id) + try: + self._conn.nwfilterLookupByName(instance_filter_name) + except libvirt.libvirtError: + name = instance.name + LOG.debug(_('The nwfilter(%(instance_filter_name)s) for' + '%(name)s is not found.') % locals()) + return False + return True + + +class IptablesFirewallDriver(FirewallDriver): + def __init__(self, execute=None, **kwargs): + from nova.network import linux_net + self.iptables = linux_net.iptables_manager + self.instances = {} + self.nwfilter = NWFilterFirewall(kwargs['get_connection']) + self.basicly_filtered = False + + self.iptables.ipv4['filter'].add_chain('sg-fallback') + self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') + self.iptables.ipv6['filter'].add_chain('sg-fallback') + self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') + + def setup_basic_filtering(self, instance, network_info=None): + """Set up provider rules and basic NWFilter.""" + if not network_info: + network_info = netutils.get_network_info(instance) + self.nwfilter.setup_basic_filtering(instance, network_info) + if not self.basicly_filtered: + LOG.debug(_("iptables firewall: Setup Basic Filtering")) + self.refresh_provider_fw_rules() + self.basicly_filtered = True + + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + + def unfilter_instance(self, instance): + if self.instances.pop(instance['id'], None): + self.remove_filters_for_instance(instance) + self.iptables.apply() + else: + LOG.info(_('Attempted to unfilter instance %s which is not ' + 'filtered'), instance['id']) + + def prepare_instance_filter(self, instance, network_info=None): + if not network_info: + network_info = netutils.get_network_info(instance) + self.instances[instance['id']] = instance + self.add_filters_for_instance(instance, network_info) + self.iptables.apply() + + def _create_filter(self, ips, chain_name): + return ['-d %s -j $%s' % (ip, chain_name) for ip in ips] + + def _filters_for_instance(self, chain_name, network_info): + ips_v4 = [ip['ip'] for (_n, mapping) in network_info + for ip in mapping['ips']] + ipv4_rules = self._create_filter(ips_v4, chain_name) + + ipv6_rules = [] + if FLAGS.use_ipv6: + ips_v6 = [ip['ip'] for (_n, mapping) in network_info + for ip in mapping['ip6s']] + ipv6_rules = self._create_filter(ips_v6, chain_name) + + return ipv4_rules, ipv6_rules + + def _add_filters(self, chain_name, ipv4_rules, ipv6_rules): + for rule in ipv4_rules: + self.iptables.ipv4['filter'].add_rule(chain_name, rule) + + if FLAGS.use_ipv6: + for rule in ipv6_rules: + self.iptables.ipv6['filter'].add_rule(chain_name, rule) + + def add_filters_for_instance(self, instance, network_info=None): + chain_name = self._instance_chain_name(instance) + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].add_chain(chain_name) + self.iptables.ipv4['filter'].add_chain(chain_name) + ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name, + network_info) + self._add_filters('local', ipv4_rules, ipv6_rules) + ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info) + self._add_filters(chain_name, ipv4_rules, ipv6_rules) + + def remove_filters_for_instance(self, instance): + chain_name = self._instance_chain_name(instance) + + self.iptables.ipv4['filter'].remove_chain(chain_name) + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].remove_chain(chain_name) + + def instance_rules(self, instance, network_info=None): + if not network_info: + network_info = netutils.get_network_info(instance) + ctxt = context.get_admin_context() + + ipv4_rules = [] + ipv6_rules = [] + + # Always drop invalid packets + ipv4_rules += ['-m state --state ' 'INVALID -j DROP'] + ipv6_rules += ['-m state --state ' 'INVALID -j DROP'] + + # Pass through provider-wide drops + ipv4_rules += ['-j $provider'] + ipv6_rules += ['-j $provider'] + + # Allow established connections + ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] + ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] + + dhcp_servers = [network['gateway'] for (network, _m) in network_info] + + for dhcp_server in dhcp_servers: + ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' + '-j ACCEPT' % (dhcp_server,)) + + #Allow project network traffic + if FLAGS.allow_project_net_traffic: + cidrs = [network['cidr'] for (network, _m) in network_info] + for cidr in cidrs: + ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) + + # We wrap these in FLAGS.use_ipv6 because they might cause + # a DB lookup. The other ones are just list operations, so + # they're not worth the clutter. + if FLAGS.use_ipv6: + # Allow RA responses + gateways_v6 = [network['gateway_v6'] for (network, _m) in + network_info] + for gateway_v6 in gateways_v6: + ipv6_rules.append( + '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) + + #Allow project network traffic + if FLAGS.allow_project_net_traffic: + cidrv6s = [network['cidr_v6'] for (network, _m) + in network_info] + + for cidrv6 in cidrv6s: + ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) + + security_groups = db.security_group_get_by_instance(ctxt, + instance['id']) + + # then, security group chains and rules + for security_group in security_groups: + rules = db.security_group_rule_get_by_security_group(ctxt, + security_group['id']) + + for rule in rules: + LOG.debug(_("Adding security group rule: %r"), rule) + + if not rule.cidr: + # Eventually, a mechanism to grant access for security + # groups will turn up here. It'll use ipsets. + continue + + version = netutils.get_ip_version(rule.cidr) + if version == 4: + fw_rules = ipv4_rules + else: + fw_rules = ipv6_rules + + protocol = rule.protocol + if version == 6 and rule.protocol == 'icmp': + protocol = 'icmpv6' + + args = ['-p', protocol, '-s', rule.cidr] + + if rule.protocol in ['udp', 'tcp']: + if rule.from_port == rule.to_port: + args += ['--dport', '%s' % (rule.from_port,)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule.from_port, + rule.to_port)] + elif rule.protocol == 'icmp': + icmp_type = rule.from_port + icmp_code = rule.to_port + + if icmp_type == -1: + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == -1: + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + if version == 4: + args += ['-m', 'icmp', '--icmp-type', + icmp_type_arg] + elif version == 6: + args += ['-m', 'icmp6', '--icmpv6-type', + icmp_type_arg] + + args += ['-j ACCEPT'] + fw_rules += [' '.join(args)] + + ipv4_rules += ['-j $sg-fallback'] + ipv6_rules += ['-j $sg-fallback'] + + return ipv4_rules, ipv6_rules + + def instance_filter_exists(self, instance): + """Check nova-instance-instance-xxx exists""" + return self.nwfilter.instance_filter_exists(instance) + + def refresh_security_group_members(self, security_group): + pass + + def refresh_security_group_rules(self, security_group, network_info=None): + self.do_refresh_security_group_rules(security_group, network_info) + self.iptables.apply() + + @utils.synchronized('iptables', external=True) + def do_refresh_security_group_rules(self, + security_group, + network_info=None): + for instance in self.instances.values(): + self.remove_filters_for_instance(instance) + if not network_info: + network_info = netutils.get_network_info(instance) + self.add_filters_for_instance(instance, network_info) + + def refresh_provider_fw_rules(self): + """See class:FirewallDriver: docs.""" + self._do_refresh_provider_fw_rules() + self.iptables.apply() + + @utils.synchronized('iptables', external=True) + def _do_refresh_provider_fw_rules(self): + """Internal, synchronized version of refresh_provider_fw_rules.""" + self._purge_provider_fw_rules() + self._build_provider_fw_rules() + + def _purge_provider_fw_rules(self): + """Remove all rules from the provider chains.""" + self.iptables.ipv4['filter'].empty_chain('provider') + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].empty_chain('provider') + + def _build_provider_fw_rules(self): + """Create all rules for the provider IP DROPs.""" + self.iptables.ipv4['filter'].add_chain('provider') + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].add_chain('provider') + ipv4_rules, ipv6_rules = self._provider_rules() + for rule in ipv4_rules: + self.iptables.ipv4['filter'].add_rule('provider', rule) + + if FLAGS.use_ipv6: + for rule in ipv6_rules: + self.iptables.ipv6['filter'].add_rule('provider', rule) + + def _provider_rules(self): + """Generate a list of rules from provider for IP4 & IP6.""" + ctxt = context.get_admin_context() + ipv4_rules = [] + ipv6_rules = [] + rules = db.provider_fw_rule_get_all(ctxt) + for rule in rules: + LOG.debug(_('Adding prvider rule: %r'), rule) + version = netutils.get_ip_version(rule.cidr) + if version == 4: + fw_rules = ipv4_rules + else: + fw_rules = ipv6_rules + + protocol = rule.protocol + if version == 6 and protocol == 'icmp': + protocol = 'icmpv6' + + args = ['-p', protocol, '-s', rule.cidr] + + if protocol in ['udp', 'tcp']: + if rule.from_port == rule.to_port: + args += ['--dport', '%s' % (rule.from_port,)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule.from_port, + rule.to_port)] + elif protocol == 'icmp': + icmp_type = rule.from_port + icmp_code = rule.to_port + + if icmp_type == -1: + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == -1: + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + if version == 4: + args += ['-m', 'icmp', '--icmp-type', + icmp_type_arg] + elif version == 6: + args += ['-m', 'icmp6', '--icmpv6-type', + icmp_type_arg] + args += ['-j DROP'] + fw_rules += [' '.join(args)] + return ipv4_rules, ipv6_rules + + def _security_group_chain_name(self, security_group_id): + return 'nova-sg-%s' % (security_group_id,) + + def _instance_chain_name(self, instance): + return 'inst-%s' % (instance['id'],) diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py new file mode 100644 index 000000000..4d596078a --- /dev/null +++ b/nova/virt/libvirt/netutils.py @@ -0,0 +1,97 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# Copyright (c) 2010 Citrix Systems, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +"""Network-releated utilities for supporting libvirt connection code.""" + + +import IPy + +from nova import context +from nova import db +from nova import flags +from nova import ipv6 +from nova import utils + + +FLAGS = flags.FLAGS + + +def get_net_and_mask(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.netmask()) + + +def get_net_and_prefixlen(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.prefixlen()) + + +def get_ip_version(cidr): + net = IPy.IP(cidr) + return int(net.version()) + + +def get_network_info(instance): + # TODO(adiantum) If we will keep this function + # we should cache network_info + admin_context = context.get_admin_context() + + ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, + instance['id']) + networks = db.network_get_all_by_instance(admin_context, + instance['id']) + flavor = db.instance_type_get_by_id(admin_context, + instance['instance_type_id']) + network_info = [] + + for network in networks: + network_ips = [ip for ip in ip_addresses + if ip['network_id'] == network['id']] + + def ip_dict(ip): + return { + 'ip': ip['address'], + 'netmask': network['netmask'], + 'enabled': '1'} + + def ip6_dict(): + prefix = network['cidr_v6'] + mac = instance['mac_address'] + project_id = instance['project_id'] + return { + 'ip': ipv6.to_global(prefix, mac, project_id), + 'netmask': network['netmask_v6'], + 'enabled': '1'} + + mapping = { + 'label': network['label'], + 'gateway': network['gateway'], + 'broadcast': network['broadcast'], + 'mac': instance['mac_address'], + 'rxtx_cap': flavor['rxtx_cap'], + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_ips]} + + if FLAGS.use_ipv6: + mapping['ip6s'] = [ip6_dict()] + mapping['gateway6'] = network['gateway_v6'] + + network_info.append((network, mapping)) + return network_info diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 4bb467fa9..7370684bd 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -387,12 +387,11 @@ def _add_file(file_path): def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
- raise exception.NotFound(_("File- '%s' is not there in the "
- "datastore") % file_path)
+ raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
@@ -579,7 +578,7 @@ class FakeVim(object): """Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
task_mdo = create_task(method, "success")
@@ -591,7 +590,7 @@ class FakeVim(object): """Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py index 159e16a80..0cbdba363 100644 --- a/nova/virt/vmwareapi/vim.py +++ b/nova/virt/vmwareapi/vim.py @@ -43,6 +43,7 @@ flags.DEFINE_string('vmwareapi_wsdl_loc', if suds:
+
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index cf6c88bbd..c3e79a92f 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -100,8 +100,7 @@ class VMWareVMOps(object): """
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref:
- raise exception.Duplicate(_("Attempted to create a VM with a name"
- " %s, but that already exists on the host") % instance.name)
+ raise exception.InstanceExists(name=instance.name)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -116,8 +115,7 @@ class VMWareVMOps(object): network_utils.get_network_with_the_name(self._session,
net_name)
if network_ref is None:
- raise exception.NotFound(_("Network with the name '%s' doesn't"
- " exist on the ESX host") % net_name)
+ raise exception.NetworkNotFoundForBridge(bridge=net_name)
_check_if_network_bridge_exists()
@@ -337,8 +335,7 @@ class VMWareVMOps(object): """
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -388,8 +385,7 @@ class VMWareVMOps(object): "VirtualMachine",
"datastore")
if not ds_ref_ret:
- raise exception.NotFound(_("Failed to get the datastore "
- "reference(s) which the VM uses"))
+ raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
@@ -480,8 +476,7 @@ class VMWareVMOps(object): """Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -501,8 +496,8 @@ class VMWareVMOps(object): # Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
- raise exception.Invalid(_("instance - %s not poweredOn. So can't "
- "be rebooted.") % instance.name)
+ reason = _("instance is not powered on")
+ raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
@@ -605,8 +600,7 @@ class VMWareVMOps(object): """Suspend the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -620,8 +614,9 @@ class VMWareVMOps(object): LOG.debug(_("Suspended the VM %s ") % instance.name)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- raise exception.Invalid(_("instance - %s is poweredOff and hence "
- " can't be suspended.") % instance.name)
+ reason = _("instance is powered off and can not be suspended.")
+ raise exception.InstanceSuspendFailure(reason=reason)
+
LOG.debug(_("VM %s was already in suspended state. So returning "
"without doing anything") % instance.name)
@@ -629,8 +624,7 @@ class VMWareVMOps(object): """Resume the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -643,15 +637,14 @@ class VMWareVMOps(object): self._wait_with_callback(instance.id, suspend_task, callback)
LOG.debug(_("Resumed the VM %s ") % instance.name)
else:
- raise exception.Invalid(_("instance - %s not in Suspended state "
- "and hence can't be Resumed.") % instance.name)
+ reason = _("instance is not in a suspended state")
+ raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance_name):
"""Return data about the VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance_name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
@@ -687,8 +680,7 @@ class VMWareVMOps(object): """Return snapshot of console."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -716,8 +708,7 @@ class VMWareVMOps(object): """
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
mac_addr = instance.mac_address
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index 20c1b2b45..1c6d2572d 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -42,6 +42,7 @@ from nova import exception from nova import flags
from nova import log as logging
from nova import utils
+from nova.virt import driver
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import vim
from nova.virt.vmwareapi import vim_util
@@ -104,11 +105,12 @@ def get_connection(_): api_retry_count)
-class VMWareESXConnection(object):
+class VMWareESXConnection(driver.ComputeDriver):
"""The ESX host connection object."""
def __init__(self, host_ip, host_username, host_password,
api_retry_count, scheme="https"):
+ super(VMWareESXConnection, self).__init__()
session = VMWareAPISession(host_ip, host_username, host_password,
api_retry_count, scheme=scheme)
self._vmops = VMWareVMOps(session)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 4434dbf0b..e36ef3288 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -294,7 +294,7 @@ class Failure(Exception): def __str__(self): try: return str(self.details) - except Exception, exc: + except Exception: return "XenAPI Fake Failure: %s" % str(self.details) def _details_map(self): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index d07d60800..9f6cd608c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -28,10 +28,7 @@ import urllib import uuid from xml.dom import minidom -from eventlet import event import glance.client -from nova import context -from nova import db from nova import exception from nova import flags from nova import log as logging @@ -49,6 +46,10 @@ LOG = logging.getLogger("nova.virt.xenapi.vm_utils") FLAGS = flags.FLAGS flags.DEFINE_string('default_os_type', 'linux', 'Default OS type') +flags.DEFINE_integer('block_device_creation_timeout', 10, + 'time to wait for a block device to be created') +flags.DEFINE_integer('max_kernel_ramdisk_size', 16 * 1024 * 1024, + 'maximum size in bytes of kernel or ramdisk images') XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -101,8 +102,8 @@ class VMHelper(HelperBase): 3. Using hardware virtualization """ - instance_type = instance_types.\ - get_instance_type(instance.instance_type) + inst_type_id = instance.instance_type_id + instance_type = instance_types.get_instance_type(inst_type_id) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { @@ -169,8 +170,8 @@ class VMHelper(HelperBase): @classmethod def ensure_free_mem(cls, session, instance): - instance_type = instance_types.get_instance_type( - instance.instance_type) + inst_type_id = instance.instance_type_id + instance_type = instance_types.get_instance_type(inst_type_id) mem = long(instance_type['memory_mb']) * 1024 * 1024 #get free memory from host host = session.get_xenapi_host() @@ -304,7 +305,6 @@ class VMHelper(HelperBase): % locals()) vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref) - vm_vdi_uuid = vm_vdi_rec["uuid"] sr_ref = vm_vdi_rec["SR"] original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref) @@ -446,6 +446,12 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES + elif image_type == ImageType.KERNEL_RAMDISK and \ + vdi_size > FLAGS.max_kernel_ramdisk_size: + max_size = FLAGS.max_kernel_ramdisk_size + raise exception.Error( + _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " + "max %(max_size)d bytes") % locals()) name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) @@ -508,9 +514,7 @@ class VMHelper(HelperBase): try: return glance_disk_format2nova_type[disk_format] except KeyError: - raise exception.NotFound( - _("Unrecognized disk_format '%(disk_format)s'") - % locals()) + raise exception.InvalidDiskFormat(disk_format=disk_format) def determine_from_instance(): if instance.kernel_id: @@ -645,8 +649,7 @@ class VMHelper(HelperBase): if n == 0: return None elif n > 1: - raise exception.Duplicate(_('duplicate name found: %s') % - name_label) + raise exception.InstanceExists(name=name_label) else: return vm_refs[0] @@ -753,14 +756,14 @@ class VMHelper(HelperBase): session.call_xenapi('SR.scan', sr_ref) -def get_rrd(host, uuid): +def get_rrd(host, vm_uuid): """Return the VM RRD XML as a string""" try: xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % ( FLAGS.xenapi_connection_username, FLAGS.xenapi_connection_password, host, - uuid)) + vm_uuid)) return xml.read() except IOError: return None @@ -855,7 +858,7 @@ def safe_find_sr(session): """ sr_ref = find_sr(session) if sr_ref is None: - raise exception.NotFound(_('Cannot find SR to read/write VDI')) + raise exception.StorageRepositoryNotFound() return sr_ref @@ -896,6 +899,16 @@ def remap_vbd_dev(dev): return remapped_dev +def _wait_for_device(dev): + """Wait for device node to appear""" + for i in xrange(0, FLAGS.block_device_creation_timeout): + if os.path.exists('/dev/%s' % dev): + return + time.sleep(1) + + raise StorageError(_('Timeout waiting for device %s to be created') % dev) + + def with_vdi_attached_here(session, vdi_ref, read_only, f): this_vm_ref = get_this_vm_ref(session) vbd_rec = {} @@ -924,6 +937,11 @@ def with_vdi_attached_here(session, vdi_ref, read_only, f): if dev != orig_dev: LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, ' 'remapping to %(dev)s') % locals()) + if dev != 'autodetect': + # NOTE(johannes): Unit tests will end up with a device called + # 'autodetect' which obviously won't exist. It's not ideal, + # but the alternatives were much messier + _wait_for_device(dev) return f(dev) finally: LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref) @@ -1003,7 +1021,6 @@ def _stream_disk(dev, image_type, virtual_size, image_file): def _write_partition(virtual_size, dev): dest = '/dev/%s' % dev - mbr_last = MBR_SIZE_SECTORS - 1 primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 @@ -1130,7 +1147,7 @@ def _prepare_injectables(inst, networks_info): 'dns': dns, 'address_v6': ip_v6 and ip_v6['ip'] or '', 'netmask_v6': ip_v6 and ip_v6['netmask'] or '', - 'gateway_v6': ip_v6 and ip_v6['gateway'] or '', + 'gateway_v6': ip_v6 and info['gateway6'] or '', 'use_ipv6': FLAGS.use_ipv6} interfaces_info.append(interface_info) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c96c35a6e..be6ef48ea 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,15 +25,15 @@ import M2Crypto import os import pickle import subprocess -import tempfile import uuid -from nova import db from nova import context -from nova import log as logging +from nova import db from nova import exception -from nova import utils from nova import flags +from nova import ipv6 +from nova import log as logging +from nova import utils from nova.auth.manager import AuthManager from nova.compute import power_state @@ -127,8 +127,7 @@ class VMOps(object): instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is not None: - raise exception.Duplicate(_('Attempted to create' - ' non-unique name %s') % instance_name) + raise exception.InstanceExists(name=instance_name) #ensure enough free memory is available if not VMHelper.ensure_free_mem(self._session, instance): @@ -176,7 +175,7 @@ class VMOps(object): vdi_ref, network_info) self.create_vifs(vm_ref, network_info) - self.inject_network_info(instance, vm_ref, network_info) + self.inject_network_info(instance, network_info, vm_ref) return vm_ref def _spawn(self, instance, vm_ref): @@ -203,6 +202,13 @@ class VMOps(object): for path, contents in instance.injected_files: LOG.debug(_("Injecting file path: '%s'") % path) self.inject_file(instance, path, contents) + + def _set_admin_password(): + admin_password = instance.admin_pass + if admin_password: + LOG.debug(_("Setting admin password")) + self.set_admin_password(instance, admin_password) + # NOTE(armando): Do we really need to do this in virt? # NOTE(tr3buchet): not sure but wherever we do it, we need to call # reset_network afterwards @@ -211,20 +217,15 @@ class VMOps(object): def _wait_for_boot(): try: state = self.get_info(instance_name)['state'] - db.instance_set_state(context.get_admin_context(), - instance['id'], state) if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() _inject_files() + _set_admin_password() return True except Exception, exc: LOG.warn(exc) - LOG.exception(_('instance %s: failed to boot'), - instance_name) - db.instance_set_state(context.get_admin_context(), - instance['id'], - power_state.SHUTDOWN) + LOG.exception(_('Instance %s: failed to boot'), instance_name) timer.stop() return False @@ -260,8 +261,8 @@ class VMOps(object): instance_name = instance_or_vm.name vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is None: - raise exception.NotFound( - _('Instance not present %s') % instance_name) + raise exception.NotFound(_("No opaque_ref could be determined " + "for '%s'.") % instance_or_vm) return vm_ref def _acquire_bootlock(self, vm): @@ -387,7 +388,6 @@ class VMOps(object): def link_disks(self, instance, base_copy_uuid, cow_uuid): """Links the base copy VHD to the COW via the XAPI plugin.""" - vm_ref = VMHelper.lookup(self._session, instance.name) new_base_copy_uuid = str(uuid.uuid4()) new_cow_uuid = str(uuid.uuid4()) params = {'instance_id': instance.id, @@ -437,11 +437,12 @@ class VMOps(object): """ # Need to uniquely identify this request. - transaction_id = str(uuid.uuid4()) + key_init_transaction_id = str(uuid.uuid4()) # The simple Diffie-Hellman class is used to manage key exchange. dh = SimpleDH() - args = {'id': transaction_id, 'pub': str(dh.get_public())} - resp = self._make_agent_call('key_init', instance, '', args) + key_init_args = {'id': key_init_transaction_id, + 'pub': str(dh.get_public())} + resp = self._make_agent_call('key_init', instance, '', key_init_args) if resp is None: # No response from the agent return @@ -455,8 +456,9 @@ class VMOps(object): dh.compute_shared(agent_pub) enc_pass = dh.encrypt(new_pass) # Send the encrypted password - args['enc_pass'] = enc_pass - resp = self._make_agent_call('password', instance, '', args) + password_transaction_id = str(uuid.uuid4()) + password_args = {'id': password_transaction_id, 'enc_pass': enc_pass} + resp = self._make_agent_call('password', instance, '', password_args) if resp is None: # No response from the agent return @@ -464,6 +466,9 @@ class VMOps(object): # Successful return code from password is '0' if resp_dict['returncode'] != '0': raise RuntimeError(resp_dict['message']) + db.instance_update(context.get_admin_context(), + instance['id'], + dict(admin_pass=new_pass)) return resp_dict['message'] def inject_file(self, instance, path, contents): @@ -579,9 +584,8 @@ class VMOps(object): if not (instance.kernel_id and instance.ramdisk_id): # 2. We only have kernel xor ramdisk - raise exception.NotFound( - _("Instance %(instance_id)s has a kernel or ramdisk but not " - "both" % locals())) + raise exception.InstanceUnacceptable(instance_id=instance_id, + reason=_("instance has a kernel or ramdisk but not both")) # 3. We have both kernel and ramdisk (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, @@ -722,8 +726,7 @@ class VMOps(object): "%s-rescue" % instance.name) if not rescue_vm_ref: - raise exception.NotFound(_( - "Instance is not in Rescue Mode: %s" % instance.name)) + raise exception.InstanceNotInRescueMode(instance_id=instance.id) original_vm_ref = VMHelper.lookup(self._session, instance.name) instance._rescue = False @@ -760,7 +763,6 @@ class VMOps(object): instance))) for vm in rescue_vms: - rescue_name = vm["name"] rescue_vm_ref = vm["vm_ref"] self._destroy_rescue_instance(rescue_vm_ref) @@ -798,15 +800,17 @@ class VMOps(object): def _get_network_info(self, instance): """Creates network info list for instance.""" admin_context = context.get_admin_context() - IPs = db.fixed_ip_get_all_by_instance(admin_context, + ips = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get_by_name(admin_context, - instance['instance_type']) + + inst_type = db.instance_type_get_by_id(admin_context, + instance['instance_type_id']) + network_info = [] for network in networks: - network_IPs = [ip for ip in IPs if ip.network_id == network.id] + network_ips = [ip for ip in ips if ip.network_id == network.id] def ip_dict(ip): return { @@ -814,12 +818,12 @@ class VMOps(object): "netmask": network["netmask"], "enabled": "1"} - def ip6_dict(ip6): + def ip6_dict(): return { - "ip": utils.to_global_ipv6(network['cidr_v6'], - instance['mac_address']), + "ip": ipv6.to_global(network['cidr_v6'], + instance['mac_address'], + instance['project_id']), "netmask": network['netmask_v6'], - "gateway": network['gateway_v6'], "enabled": "1"} info = { @@ -827,23 +831,41 @@ class VMOps(object): 'gateway': network['gateway'], 'broadcast': network['broadcast'], 'mac': instance.mac_address, - 'rxtx_cap': flavor['rxtx_cap'], + 'rxtx_cap': inst_type['rxtx_cap'], 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_IPs]} + 'ips': [ip_dict(ip) for ip in network_ips]} if network['cidr_v6']: - info['ip6s'] = [ip6_dict(ip) for ip in network_IPs] + info['ip6s'] = [ip6_dict()] + if network['gateway_v6']: + info['gateway6'] = network['gateway_v6'] network_info.append((network, info)) return network_info - def inject_network_info(self, instance, vm_ref, network_info): + #TODO{tr3buchet) remove this shim with nova-multi-nic + def inject_network_info(self, instance, network_info=None, vm_ref=None): + """ + shim in place which makes inject_network_info work without being + passed network_info. + shim goes away after nova-multi-nic + """ + if not network_info: + network_info = self._get_network_info(instance) + self._inject_network_info(instance, network_info, vm_ref) + + def _inject_network_info(self, instance, network_info, vm_ref=None): """ Generate the network info and make calls to place it into the xenstore and the xenstore param list. + vm_ref can be passed in because it will sometimes be different than + what VMHelper.lookup(session, instance.name) will find (ex: rescue) """ logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) - # this function raises if vm_ref is not a vm_opaque_ref - self._session.get_xenapi().VM.get_record(vm_ref) + if vm_ref: + # this function raises if vm_ref is not a vm_opaque_ref + self._session.get_xenapi().VM.get_record(vm_ref) + else: + vm_ref = VMHelper.lookup(self._session, instance.name) for (network, info) in network_info: location = 'vm-data/networking/%s' % info['mac'].replace(':', '') @@ -875,8 +897,10 @@ class VMOps(object): VMHelper.create_vif(self._session, vm_ref, network_ref, mac_address, device, rxtx_cap) - def reset_network(self, instance, vm_ref): + def reset_network(self, instance, vm_ref=None): """Creates uuid arg to pass to make_agent_call and calls it.""" + if not vm_ref: + vm_ref = VMHelper.lookup(self._session, instance.name) args = {'id': str(uuid.uuid4())} # TODO(tr3buchet): fix function call after refactor #resp = self._make_agent_call('resetnetwork', instance, '', args) @@ -902,7 +926,7 @@ class VMOps(object): try: ret = self._make_xenstore_call('read_record', vm, path, {'ignore_missing_path': 'True'}) - except self.XenAPI.Failure, e: + except self.XenAPI.Failure: return None ret = json.loads(ret) if ret == "None": @@ -1150,23 +1174,22 @@ class SimpleDH(object): return mpi def _run_ssl(self, text, which): - base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc ' - '-a -pass pass:%(shared)s -nosalt %(dec_flag)s') + base_cmd = ('openssl enc -aes-128-cbc -a -pass pass:%(shared)s ' + '-nosalt %(dec_flag)s') if which.lower()[0] == 'd': dec_flag = ' -d' else: dec_flag = '' - fd, tmpfile = tempfile.mkstemp() - os.close(fd) - file(tmpfile, 'w').write(text) shared = self._shared cmd = base_cmd % locals() proc = _runproc(cmd) + proc.stdin.write(text + '\n') + proc.stdin.close() proc.wait() err = proc.stderr.read() if err: raise RuntimeError(_('OpenSSL error: %s') % err) - return proc.stdout.read() + return proc.stdout.read().strip('\n') def encrypt(self, text): return self._run_ssl(text, 'enc') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 72284ac02..7821a4f7e 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -204,14 +204,17 @@ def _get_volume_id(path_or_id): if isinstance(path_or_id, int): return path_or_id # n must contain at least the volume_id - # /vol- is for remote volumes - # -vol- is for local volumes + # :volume- is for remote volumes + # -volume- is for local volumes # see compute/manager->setup_compute_volume - volume_id = path_or_id[path_or_id.find('/vol-') + 1:] + volume_id = path_or_id[path_or_id.find(':volume-') + 1:] if volume_id == path_or_id: - volume_id = path_or_id[path_or_id.find('-vol-') + 1:] - volume_id = volume_id.replace('--', '-') - return volume_id + volume_id = path_or_id[path_or_id.find('-volume--') + 1:] + volume_id = volume_id.replace('volume--', '') + else: + volume_id = volume_id.replace('volume-', '') + volume_id = volume_id[0:volume_id.find('-')] + return int(volume_id) def _get_target_host(iscsi_string): @@ -244,25 +247,23 @@ def _get_target(volume_id): Gets iscsi name and portal from volume name and host. For this method to work the following are needed: 1) volume_ref['host'] must resolve to something rather than loopback - 2) ietd must bind only to the address as resolved above - If any of the two conditions are not met, fall back on Flags. """ - volume_ref = db.volume_get_by_ec2_id(context.get_admin_context(), - volume_id) + volume_ref = db.volume_get(context.get_admin_context(), + volume_id) result = (None, None) try: - (r, _e) = utils.execute("sudo iscsiadm -m discovery -t " - "sendtargets -p %s" % - volume_ref['host']) + (r, _e) = utils.execute('sudo', 'iscsiadm', + '-m', 'discovery', + '-t', 'sendtargets', + '-p', volume_ref['host']) except exception.ProcessExecutionError, exc: LOG.exception(exc) else: - targets = r.splitlines() - if len(_e) == 0 and len(targets) == 1: - for target in targets: - if volume_id in target: - (location, _sep, iscsi_name) = target.partition(" ") - break - iscsi_portal = location.split(",")[0] - result = (iscsi_name, iscsi_portal) + volume_name = "volume-%08x" % volume_id + for target in r.splitlines(): + if FLAGS.iscsi_ip_prefix in target and volume_name in target: + (location, _sep, iscsi_name) = target.partition(" ") + break + iscsi_portal = location.split(",")[0] + result = (iscsi_name, iscsi_portal) return result diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index 757ecf5ad..afcb8cf47 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -45,8 +45,7 @@ class VolumeOps(object): # Before we start, check that the VM exists vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is None: - raise exception.NotFound(_('Instance %s not found') - % instance_name) + raise exception.InstanceNotFound(instance_id=instance_name) # NOTE: No Resource Pool concept so far LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s," " %(mountpoint)s") % locals()) @@ -98,8 +97,7 @@ class VolumeOps(object): # Before we start, check that the VM exists vm_ref = VMHelper.lookup(self._session, instance_name) if vm_ref is None: - raise exception.NotFound(_('Instance %s not found') - % instance_name) + raise exception.InstanceNotFound(instance_id=instance_name) # Detach VBD from VM LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s") % locals()) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 99fd35c61..6d828e109 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -57,19 +57,24 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. - suffix "_rec" for record objects """ +import json +import random import sys import urlparse import xmlrpclib from eventlet import event from eventlet import tpool +from eventlet import timeout from nova import context from nova import db +from nova import exception from nova import utils from nova import flags from nova import log as logging from nova.virt import driver +from nova.virt.xenapi import vm_utils from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps @@ -140,6 +145,9 @@ flags.DEFINE_bool('xenapi_remap_vbd_dev', False, flags.DEFINE_string('xenapi_remap_vbd_dev_prefix', 'sd', 'Specify prefix to remap VBD dev to ' '(ex. /dev/xvdb -> /dev/sdb)') +flags.DEFINE_integer('xenapi_login_timeout', + 10, + 'Timeout in seconds for XenAPI login.') def get_connection(_): @@ -161,9 +169,16 @@ class XenAPIConnection(driver.ComputeDriver): def __init__(self, url, user, pw): super(XenAPIConnection, self).__init__() - session = XenAPISession(url, user, pw) - self._vmops = VMOps(session) - self._volumeops = VolumeOps(session) + self._session = XenAPISession(url, user, pw) + self._vmops = VMOps(self._session) + self._volumeops = VolumeOps(self._session) + self._host_state = None + + @property + def HostState(self): + if not self._host_state: + self._host_state = HostState(self._session) + return self._host_state def init_host(self, host): #FIXME(armando): implement this @@ -311,6 +326,16 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') + def update_host_status(self): + """Update the status info of the host, and return those values + to the calling program.""" + return self.HostState.update_status() + + def get_host_stats(self, refresh=False): + """Return the current state of the host. If 'refresh' is + True, run the update first.""" + return self.HostState.get_host_stats(refresh=refresh) + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" @@ -318,8 +343,10 @@ class XenAPISession(object): def __init__(self, url, user, pw): self.XenAPI = self.get_imported_xenapi() self._session = self._create_session(url) - self._session.login_with_password(user, pw) - self.loop = None + exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " + "(is the Dom0 disk full?)")) + with timeout.Timeout(FLAGS.xenapi_login_timeout, exception): + self._session.login_with_password(user, pw) def get_imported_xenapi(self): """Stubout point. This can be replaced with a mock xenapi module.""" @@ -356,57 +383,52 @@ class XenAPISession(object): def wait_for_task(self, task, id=None): """Return the result of the given task. The task is polled - until it completes. Not re-entrant.""" + until it completes.""" done = event.Event() - self.loop = utils.LoopingCall(self._poll_task, id, task, done) - self.loop.start(FLAGS.xenapi_task_poll_interval, now=True) - rv = done.wait() - self.loop.stop() - return rv - - def _stop_loop(self): - """Stop polling for task to finish.""" - #NOTE(sandy-walsh) Had to break this call out to support unit tests. - if self.loop: - self.loop.stop() + loop = utils.LoopingCall(f=None) + + def _poll_task(): + """Poll the given XenAPI task, and return the result if the + action was completed successfully or not. + """ + try: + name = self._session.xenapi.task.get_name_label(task) + status = self._session.xenapi.task.get_status(task) + if id: + action = dict( + instance_id=int(id), + action=name[0:255], # Ensure action is never > 255 + error=None) + if status == "pending": + return + elif status == "success": + result = self._session.xenapi.task.get_result(task) + LOG.info(_("Task [%(name)s] %(task)s status:" + " success %(result)s") % locals()) + done.send(_parse_xmlrpc_value(result)) + else: + error_info = self._session.xenapi.task.get_error_info(task) + action["error"] = str(error_info) + LOG.warn(_("Task [%(name)s] %(task)s status:" + " %(status)s %(error_info)s") % locals()) + done.send_exception(self.XenAPI.Failure(error_info)) + + if id: + db.instance_action_create(context.get_admin_context(), + action) + except self.XenAPI.Failure, exc: + LOG.warn(exc) + done.send_exception(*sys.exc_info()) + loop.stop() + + loop.f = _poll_task + loop.start(FLAGS.xenapi_task_poll_interval, now=True) + return done.wait() def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" return self.XenAPI.Session(url) - def _poll_task(self, id, task, done): - """Poll the given XenAPI task, and fire the given action if we - get a result. - """ - try: - name = self._session.xenapi.task.get_name_label(task) - status = self._session.xenapi.task.get_status(task) - if id: - action = dict( - instance_id=int(id), - action=name[0:255], # Ensure action is never > 255 - error=None) - if status == "pending": - return - elif status == "success": - result = self._session.xenapi.task.get_result(task) - LOG.info(_("Task [%(name)s] %(task)s status:" - " success %(result)s") % locals()) - done.send(_parse_xmlrpc_value(result)) - else: - error_info = self._session.xenapi.task.get_error_info(task) - action["error"] = str(error_info) - LOG.warn(_("Task [%(name)s] %(task)s status:" - " %(status)s %(error_info)s") % locals()) - done.send_exception(self.XenAPI.Failure(error_info)) - - if id: - db.instance_action_create(context.get_admin_context(), action) - except self.XenAPI.Failure, exc: - LOG.warn(exc) - done.send_exception(*sys.exc_info()) - self._stop_loop() - def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details""" try: @@ -429,6 +451,65 @@ class XenAPISession(object): raise +class HostState(object): + """Manages information about the XenServer host this compute + node is running on. + """ + def __init__(self, session): + super(HostState, self).__init__() + self._session = session + self._stats = {} + self.update_status() + + def get_host_stats(self, refresh=False): + """Return the current state of the host. If 'refresh' is + True, run the update first. + """ + if refresh: + self.update_status() + return self._stats + + def update_status(self): + """Since under Xenserver, a compute node runs on a given host, + we can get host status information using xenapi. + """ + LOG.debug(_("Updating host stats")) + # Make it something unlikely to match any actual instance ID + task_id = random.randint(-80000, -70000) + task = self._session.async_call_plugin("xenhost", "host_data", {}) + task_result = self._session.wait_for_task(task, task_id) + if not task_result: + task_result = json.dumps("") + try: + data = json.loads(task_result) + except ValueError as e: + # Invalid JSON object + LOG.error(_("Unable to get updated status: %s") % e) + return + # Get the SR usage + try: + sr_ref = vm_utils.safe_find_sr(self._session) + except exception.NotFound as e: + # No SR configured + LOG.error(_("Unable to get SR for this host: %s") % e) + return + sr_rec = self._session.get_xenapi().SR.get_record(sr_ref) + total = int(sr_rec["virtual_allocation"]) + used = int(sr_rec["physical_utilisation"]) + data["disk_total"] = total + data["disk_used"] = used + data["disk_available"] = total - used + host_memory = data.get('host_memory', None) + if host_memory: + data["host_memory_total"] = host_memory.get('total', 0) + data["host_memory_overhead"] = host_memory.get('overhead', 0) + data["host_memory_free"] = host_memory.get('free', 0) + data["host_memory_free_computed"] = \ + host_memory.get('free-computed', 0) + del data['host_memory'] + self._stats = data + + def _parse_xmlrpc_value(val): """Parse the given value as if it were an XML-RPC value. This is sometimes used as the format for the task.result field.""" diff --git a/nova/volume/api.py b/nova/volume/api.py index 4b4bb9dc5..09befb647 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -103,3 +103,10 @@ class API(base.Base): # TODO(vish): abstract status checking? if volume['status'] == "available": raise exception.ApiError(_("Volume is already detached")) + + def remove_from_compute(self, context, volume_id, host): + """Remove volume from specified compute host.""" + rpc.call(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "remove_volume", + "args": {'volume_id': volume_id}}) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 850893914..55307ad9b 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -112,6 +112,12 @@ class VolumeDriver(object): # If the volume isn't present, then don't attempt to delete return True + # zero out old volumes to prevent data leaking between users + # TODO(ja): reclaiming space should be done lazy and low priority + self._execute('sudo', 'dd', 'if=/dev/zero', + 'of=%s' % self.local_path(volume), + 'count=%d' % (volume['size'] * 1024), + 'bs=1M') self._try_execute('sudo', 'lvremove', '-f', "%s/%s" % (FLAGS.volume_group, volume['name'])) @@ -557,7 +563,7 @@ class RBDDriver(VolumeDriver): """Returns the path of the rbd volume.""" # This is the same as the remote path # since qemu accesses it directly. - return self.discover_volume(volume) + return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) def ensure_export(self, context, volume): """Synchronously recreates an export for a logical volume.""" @@ -571,10 +577,8 @@ class RBDDriver(VolumeDriver): """Removes an export for a logical volume""" pass - def discover_volume(self, volume): + def discover_volume(self, context, volume): """Discover volume on a remote host""" - # NOTE(justinsb): This is messed up... discover_volume takes 3 args - # but then that would break local_path return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) def undiscover_volume(self, volume): diff --git a/nova/wsgi.py b/nova/wsgi.py index 94aafdf1c..ea9bb963d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -17,9 +17,7 @@ # License for the specific language governing permissions and limitations # under the License. -""" -Utility methods for working with WSGI servers -""" +"""Utility methods for working with WSGI servers.""" import os import sys @@ -33,7 +31,6 @@ import routes.middleware import webob import webob.dec import webob.exc - from paste import deploy from nova import exception @@ -43,6 +40,7 @@ from nova import utils FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.wsgi') class WritableLogger(object): @@ -61,13 +59,16 @@ class Server(object): def __init__(self, threads=1000): self.pool = eventlet.GreenPool(threads) + self.socket_info = {} - def start(self, application, port, host='0.0.0.0', backlog=128): + def start(self, application, port, host='0.0.0.0', key=None, backlog=128): """Run a WSGI server with the given application.""" arg0 = sys.argv[0] - logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals()) + logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals()) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) + if key: + self.socket_info[key] = socket.getsockname() def wait(self): """Wait until all servers have completed running.""" @@ -86,30 +87,34 @@ class Server(object): class Request(webob.Request): def best_match_content_type(self): - """ - Determine the most acceptable content-type based on the - query extension then the Accept header - """ + """Determine the most acceptable content-type. - parts = self.path.rsplit(".", 1) + Based on the query extension then the Accept header. + + """ + parts = self.path.rsplit('.', 1) if len(parts) > 1: format = parts[1] - if format in ["json", "xml"]: - return "application/{0}".format(parts[1]) + if format in ['json', 'xml']: + return 'application/{0}'.format(parts[1]) - ctypes = ["application/json", "application/xml"] + ctypes = ['application/json', 'application/xml'] bm = self.accept.best_match(ctypes) - return bm or "application/json" + return bm or 'application/json' def get_content_type(self): - try: - ct = self.headers["Content-Type"] - assert ct in ("application/xml", "application/json") - return ct - except Exception: - raise webob.exc.HTTPBadRequest("Invalid content type") + allowed_types = ("application/xml", "application/json") + if not "Content-Type" in self.headers: + msg = _("Missing Content-Type") + LOG.debug(msg) + raise webob.exc.HTTPBadRequest(msg) + type = self.content_type + if type in allowed_types: + return type + LOG.debug(_("Wrong Content-Type: %s") % type) + raise webob.exc.HTTPBadRequest("Invalid content type") class Application(object): @@ -117,7 +122,7 @@ class Application(object): @classmethod def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config fles. + """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [app:APPNAME] section of the paste config) will be passed into the `__init__` method @@ -172,8 +177,9 @@ class Application(object): See the end of http://pythonpaste.org/webob/modules/dec.html for more info. + """ - raise NotImplementedError(_("You must implement __call__")) + raise NotImplementedError(_('You must implement __call__')) class Middleware(Application): @@ -183,11 +189,12 @@ class Middleware(Application): initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. + """ @classmethod def factory(cls, global_config, **local_config): - """Used for paste app factories in paste.deploy config fles. + """Used for paste app factories in paste.deploy config files. Any local configuration (that is, values under the [filter:APPNAME] section of the paste config) will be passed into the `__init__` method @@ -239,20 +246,24 @@ class Middleware(Application): class Debug(Middleware): - """Helper class that can be inserted into any WSGI application chain - to get information about the request and response.""" + """Helper class for debugging a WSGI application. + + Can be inserted into any WSGI application chain to get information + about the request and response. + + """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - print ("*" * 40) + " REQUEST ENVIRON" + print ('*' * 40) + ' REQUEST ENVIRON' for key, value in req.environ.items(): - print key, "=", value + print key, '=', value print resp = req.get_response(self.application) - print ("*" * 40) + " RESPONSE HEADERS" + print ('*' * 40) + ' RESPONSE HEADERS' for (key, value) in resp.headers.iteritems(): - print key, "=", value + print key, '=', value print resp.app_iter = self.print_generator(resp.app_iter) @@ -261,11 +272,8 @@ class Debug(Middleware): @staticmethod def print_generator(app_iter): - """ - Iterator that prints the contents of a wrapper string iterator - when iterated. - """ - print ("*" * 40) + " BODY" + """Iterator that prints the contents of a wrapper string.""" + print ('*' * 40) + ' BODY' for part in app_iter: sys.stdout.write(part) sys.stdout.flush() @@ -274,13 +282,10 @@ class Debug(Middleware): class Router(object): - """ - WSGI middleware that maps incoming requests to WSGI apps. - """ + """WSGI middleware that maps incoming requests to WSGI apps.""" def __init__(self, mapper): - """ - Create a router for the given routes.Mapper. + """Create a router for the given routes.Mapper. Each route in `mapper` must specify a 'controller', which is a WSGI app to call. You'll probably want to specify an 'action' as @@ -292,15 +297,16 @@ class Router(object): sc = ServerController() # Explicit mapping of one route to a controller+action - mapper.connect(None, "/svrlist", controller=sc, action="list") + mapper.connect(None, '/svrlist', controller=sc, action='list') # Actions are all implicitly defined - mapper.resource("server", "servers", controller=sc) + mapper.resource('server', 'servers', controller=sc) # Pointing to an arbitrary WSGI app. You can specify the # {path_info:.*} parameter so the target app can be handed just that # section of the URL. - mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp()) + mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp()) + """ self.map = mapper self._router = routes.middleware.RoutesMiddleware(self._dispatch, @@ -308,19 +314,22 @@ class Router(object): @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - """ - Route the incoming request to a controller based on self.map. + """Route the incoming request to a controller based on self.map. + If no match, return a 404. + """ return self._router @staticmethod @webob.dec.wsgify(RequestClass=Request) def _dispatch(req): - """ + """Dispatch the request to the appropriate controller. + Called by self._router after matching the incoming request to a route and putting the information into req.environ. Either returns 404 or the routed WSGI app's response. + """ match = req.environ['wsgiorg.routing_args'][1] if not match: @@ -330,22 +339,23 @@ class Router(object): class Controller(object): - """ + """WSGI app that dispatched to methods. + WSGI app that reads routing information supplied by RoutesMiddleware and calls the requested action method upon itself. All action methods must, in addition to their normal parameters, accept a 'req' argument which is the incoming wsgi.Request. They raise a webob.exc exception, or return a dict which will be serialized by requested content type. + """ @webob.dec.wsgify(RequestClass=Request) def __call__(self, req): - """ - Call the method specified in req.environ by RoutesMiddleware. - """ + """Call the method specified in req.environ by RoutesMiddleware.""" arg_dict = req.environ['wsgiorg.routing_args'][1] action = arg_dict['action'] method = getattr(self, action) + LOG.debug("%s %s" % (req.method, req.url)) del arg_dict['controller'] del arg_dict['action'] if 'format' in arg_dict: @@ -359,19 +369,23 @@ class Controller(object): body = self._serialize(result, content_type, default_xmlns) response = webob.Response() - response.headers["Content-Type"] = content_type + response.headers['Content-Type'] = content_type response.body = body + msg_dict = dict(url=req.url, status=response.status_int) + msg = _("%(url)s returned with HTTP %(status)d") % msg_dict + LOG.debug(msg) return response else: return result def _serialize(self, data, content_type, default_xmlns): - """ - Serialize the given dict to the provided content_type. + """Serialize the given dict to the provided content_type. + Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. + """ - _metadata = getattr(type(self), "_serialization_metadata", {}) + _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata, default_xmlns) try: @@ -380,12 +394,13 @@ class Controller(object): raise webob.exc.HTTPNotAcceptable() def _deserialize(self, data, content_type): - """ - Deserialize the request body to the specefied content type. + """Deserialize the request body to the specefied content type. + Uses self._serialization_metadata if it exists, which is a dict mapping MIME types to information needed to serialize to that type. + """ - _metadata = getattr(type(self), "_serialization_metadata", {}) + _metadata = getattr(type(self), '_serialization_metadata', {}) serializer = Serializer(_metadata) return serializer.deserialize(data, content_type) @@ -395,55 +410,51 @@ class Controller(object): class Serializer(object): - """ - Serializes and deserializes dictionaries to certain MIME types. - """ + """Serializes and deserializes dictionaries to certain MIME types.""" def __init__(self, metadata=None, default_xmlns=None): - """ - Create a serializer based on the given WSGI environment. + """Create a serializer based on the given WSGI environment. + 'metadata' is an optional dict mapping MIME types to information needed to serialize a dictionary to that type. + """ self.metadata = metadata or {} self.default_xmlns = default_xmlns def _get_serialize_handler(self, content_type): handlers = { - "application/json": self._to_json, - "application/xml": self._to_xml, + 'application/json': self._to_json, + 'application/xml': self._to_xml, } try: return handlers[content_type] except Exception: - raise exception.InvalidContentType() + raise exception.InvalidContentType(content_type=content_type) def serialize(self, data, content_type): - """ - Serialize a dictionary into a string of the specified content type. - """ + """Serialize a dictionary into the specified content type.""" return self._get_serialize_handler(content_type)(data) def deserialize(self, datastring, content_type): - """ - Deserialize a string to a dictionary. + """Deserialize a string to a dictionary. The string must be in the format of a supported MIME type. + """ return self.get_deserialize_handler(content_type)(datastring) def get_deserialize_handler(self, content_type): handlers = { - "application/json": self._from_json, - "application/xml": self._from_xml, + 'application/json': self._from_json, + 'application/xml': self._from_xml, } try: return handlers[content_type] except Exception: - raise exception.InvalidContentType(_("Invalid content type %s" - % content_type)) + raise exception.InvalidContentType(content_type=content_type) def _from_json(self, datastring): return utils.loads(datastring) @@ -455,11 +466,11 @@ class Serializer(object): return {node.nodeName: self._from_xml_node(node, plurals)} def _from_xml_node(self, node, listnames): - """ - Convert a minidom node to a simple Python type. + """Convert a minidom node to a simple Python type. listnames is a collection of names of XML nodes whose subnodes should be considered list items. + """ if len(node.childNodes) == 1 and node.childNodes[0].nodeType == 3: return node.childNodes[0].nodeValue @@ -502,6 +513,14 @@ class Serializer(object): result.setAttribute('xmlns', xmlns) if type(data) is list: + collections = metadata.get('list_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for item in data: + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(item)) + result.appendChild(node) + return result singular = metadata.get('plurals', {}).get(nodename, None) if singular is None: if nodename.endswith('s'): @@ -512,6 +531,16 @@ class Serializer(object): node = self._to_xml_node(doc, metadata, singular, item) result.appendChild(node) elif type(data) is dict: + collections = metadata.get('dict_collections', {}) + if nodename in collections: + metadata = collections[nodename] + for k, v in data.items(): + node = doc.createElement(metadata['item_name']) + node.setAttribute(metadata['item_key'], str(k)) + text = doc.createTextNode(str(v)) + node.appendChild(text) + result.appendChild(node) + return result attrs = metadata.get('attributes', {}).get(nodename, {}) for k, v in data.items(): if k in attrs: @@ -548,7 +577,6 @@ def paste_config_file(basename): * /etc/nova, which may not be diffrerent from state_path on your distro """ - configfiles = [basename, os.path.join(FLAGS.state_path, 'etc', 'nova', basename), os.path.join(FLAGS.state_path, 'etc', basename), @@ -564,7 +592,7 @@ def load_paste_configuration(filename, appname): filename = os.path.abspath(filename) config = None try: - config = deploy.appconfig("config:%s" % filename, name=appname) + config = deploy.appconfig('config:%s' % filename, name=appname) except LookupError: pass return config @@ -575,7 +603,7 @@ def load_paste_app(filename, appname): filename = os.path.abspath(filename) app = None try: - app = deploy.loadapp("config:%s" % filename, name=appname) + app = deploy.loadapp('config:%s' % filename, name=appname) except LookupError: pass return app diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 5496a6bd5..9e761f264 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -53,7 +53,6 @@ class TimeoutError(StandardError): pass -@jsonify def key_init(self, arg_dict): """Handles the Diffie-Hellman key exchange with the agent to establish the shared secret key used to encrypt/decrypt sensitive @@ -72,7 +71,6 @@ def key_init(self, arg_dict): return resp -@jsonify def password(self, arg_dict): """Writes a request to xenstore that tells the agent to set the root password for the given VM. The password should be @@ -80,7 +78,6 @@ def password(self, arg_dict): previous call to key_init. The encrypted password value should be passed as the value for the 'enc_pass' key in arg_dict. """ - pub = int(arg_dict["pub"]) enc_pass = arg_dict["enc_pass"] arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass}) request_id = arg_dict["id"] diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 0a45f3873..4b45671ae 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -68,12 +68,12 @@ def _download_tarball(sr_path, staging_path, image_id, glance_host, area. """ conn = httplib.HTTPConnection(glance_host, glance_port) - conn.request('GET', '/images/%s' % image_id) + conn.request('GET', '/v1/images/%s' % image_id) resp = conn.getresponse() if resp.status == httplib.NOT_FOUND: raise Exception("Image '%s' not found in Glance" % image_id) elif resp.status != httplib.OK: - raise Exception("Unexpected response from Glance %i" % res.status) + raise Exception("Unexpected response from Glance %i" % resp.status) tar_cmd = "tar -zx --directory=%(staging_path)s" % locals() tar_proc = _make_subprocess(tar_cmd, stderr=True, stdin=True) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost new file mode 100644 index 000000000..a8428e841 --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -0,0 +1,183 @@ +#!/usr/bin/env python + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for reading/writing information to xenstore +# + +try: + import json +except ImportError: + import simplejson as json +import os +import random +import re +import subprocess +import tempfile +import time + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging("xenhost") + +host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") + + +def jsonify(fnc): + def wrapper(*args, **kwargs): + return json.dumps(fnc(*args, **kwargs)) + return wrapper + + +class TimeoutError(StandardError): + pass + + +def _run_command(cmd): + """Abstracts out the basics of issuing system commands. If the command + returns anything in stderr, a PluginError is raised with that information. + Otherwise, the output from stdout is returned. + """ + pipe = subprocess.PIPE + proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + proc.wait() + err = proc.stderr.read() + if err: + raise pluginlib.PluginError(err) + return proc.stdout.read() + + +@jsonify +def host_data(self, arg_dict): + """Runs the commands on the xenstore host to return the current status + information. + """ + cmd = "xe host-list | grep uuid" + resp = _run_command(cmd) + host_uuid = resp.split(":")[-1].strip() + cmd = "xe host-param-list uuid=%s" % host_uuid + resp = _run_command(cmd) + parsed_data = parse_response(resp) + # We have the raw dict of values. Extract those that we need, + # and convert the data types as needed. + ret_dict = cleanup(parsed_data) + return ret_dict + + +def parse_response(resp): + data = {} + for ln in resp.splitlines(): + if not ln: + continue + mtch = host_data_pattern.match(ln.strip()) + try: + k, v = mtch.groups() + data[k] = v + except AttributeError: + # Not a valid line; skip it + continue + return data + + +def cleanup(dct): + """Take the raw KV pairs returned and translate them into the + appropriate types, discarding any we don't need. + """ + def safe_int(val): + """Integer values will either be string versions of numbers, + or empty strings. Convert the latter to nulls. + """ + try: + return int(val) + except ValueError: + return None + + def strip_kv(ln): + return [val.strip() for val in ln.split(":", 1)] + + out = {} + +# sbs = dct.get("supported-bootloaders", "") +# out["host_supported-bootloaders"] = sbs.split("; ") +# out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "") +# out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "") +# out["host_local-cache-sr"] = dct.get("local-cache-sr", "") + out["host_memory"] = omm = {} + omm["total"] = safe_int(dct.get("memory-total", "")) + omm["overhead"] = safe_int(dct.get("memory-overhead", "")) + omm["free"] = safe_int(dct.get("memory-free", "")) + omm["free-computed"] = safe_int( + dct.get("memory-free-computed", "")) + +# out["host_API-version"] = avv = {} +# avv["vendor"] = dct.get("API-version-vendor", "") +# avv["major"] = safe_int(dct.get("API-version-major", "")) +# avv["minor"] = safe_int(dct.get("API-version-minor", "")) + + out["host_uuid"] = dct.get("uuid", None) + out["host_name-label"] = dct.get("name-label", "") + out["host_name-description"] = dct.get("name-description", "") +# out["host_host-metrics-live"] = dct.get( +# "host-metrics-live", "false") == "true" + out["host_hostname"] = dct.get("hostname", "") + out["host_ip_address"] = dct.get("address", "") + oc = dct.get("other-config", "") + out["host_other-config"] = ocd = {} + if oc: + for oc_fld in oc.split("; "): + ock, ocv = strip_kv(oc_fld) + ocd[ock] = ocv +# out["host_capabilities"] = dct.get("capabilities", "").split("; ") +# out["host_allowed-operations"] = dct.get( +# "allowed-operations", "").split("; ") +# lsrv = dct.get("license-server", "") +# out["host_license-server"] = ols = {} +# if lsrv: +# for lspart in lsrv.split("; "): +# lsk, lsv = lspart.split(": ") +# if lsk == "port": +# ols[lsk] = safe_int(lsv) +# else: +# ols[lsk] = lsv +# sv = dct.get("software-version", "") +# out["host_software-version"] = osv = {} +# if sv: +# for svln in sv.split("; "): +# svk, svv = strip_kv(svln) +# osv[svk] = svv + cpuinf = dct.get("cpu_info", "") + out["host_cpu_info"] = ocp = {} + if cpuinf: + for cpln in cpuinf.split("; "): + cpk, cpv = strip_kv(cpln) + if cpk in ("cpu_count", "family", "model", "stepping"): + ocp[cpk] = safe_int(cpv) + else: + ocp[cpk] = cpv +# out["host_edition"] = dct.get("edition", "") +# out["host_external-auth-service-name"] = dct.get( +# "external-auth-service-name", "") + return out + + +if __name__ == "__main__": + XenAPIPlugin.dispatch( + {"host_data": host_data}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py index a35ccd6ab..6c589ed29 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py @@ -56,16 +56,17 @@ def read_record(self, arg_dict): and boolean True, attempting to read a non-existent path will return the string 'None' instead of raising an exception. """ - cmd = "xenstore-read /local/domain/%(dom_id)s/%(path)s" % arg_dict + cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict] try: - return _run_command(cmd).rstrip("\n") + ret, result = _run_command(cmd) + return result.strip() except pluginlib.PluginError, e: if arg_dict.get("ignore_missing_path", False): - cmd = "xenstore-exists /local/domain/%(dom_id)s/%(path)s; echo $?" - cmd = cmd % arg_dict - ret = _run_command(cmd).strip() + cmd = ["xenstore-exists", + "/local/domain/%(dom_id)s/%(path)s" % arg_dict] + ret, result = _run_command(cmd) # If the path exists, the cmd should return "0" - if ret != "0": + if ret != 0: # No such path, so ignore the error and return the # string 'None', since None can't be marshalled # over RPC. @@ -83,8 +84,9 @@ def write_record(self, arg_dict): you must specify a 'value' key, whose value must be a string. Typically, you can json-ify more complex values and store the json output. """ - cmd = "xenstore-write /local/domain/%(dom_id)s/%(path)s '%(value)s'" - cmd = cmd % arg_dict + cmd = ["xenstore-write", + "/local/domain/%(dom_id)s/%(path)s" % arg_dict, + arg_dict["value"]] _run_command(cmd) return arg_dict["value"] @@ -96,10 +98,10 @@ def list_records(self, arg_dict): path as the key and the stored value as the value. If the path doesn't exist, an empty dict is returned. """ - cmd = "xenstore-ls /local/domain/%(dom_id)s/%(path)s" % arg_dict - cmd = cmd.rstrip("/") + dirpath = "/local/domain/%(dom_id)s/%(path)s" % arg_dict + cmd = ["xenstore-ls", dirpath.rstrip("/")] try: - recs = _run_command(cmd) + ret, recs = _run_command(cmd) except pluginlib.PluginError, e: if "No such file or directory" in "%s" % e: # Path doesn't exist. @@ -128,8 +130,9 @@ def delete_record(self, arg_dict): """Just like it sounds: it removes the record for the specified VM and the specified path from xenstore. """ - cmd = "xenstore-rm /local/domain/%(dom_id)s/%(path)s" % arg_dict - return _run_command(cmd) + cmd = ["xenstore-rm", "/local/domain/%(dom_id)s/%(path)s" % arg_dict] + ret, result = _run_command(cmd) + return result def _paths_from_ls(recs): @@ -168,16 +171,16 @@ def _paths_from_ls(recs): def _run_command(cmd): """Abstracts out the basics of issuing system commands. If the command returns anything in stderr, a PluginError is raised with that information. - Otherwise, the output from stdout is returned. + Otherwise, a tuple of (return code, stdout data) is returned. """ pipe = subprocess.PIPE - proc = subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, - stderr=pipe, close_fds=True) - proc.wait() + proc = subprocess.Popen(cmd, stdin=pipe, stdout=pipe, stderr=pipe, + close_fds=True) + ret = proc.wait() err = proc.stderr.read() if err: raise pluginlib.PluginError(err) - return proc.stdout.read() + return (ret, proc.stdout.read()) if __name__ == "__main__": @@ -7,2124 +7,2842 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" "PO-Revision-Date: 2011-01-12 19:50+0000\n" "Last-Translator: Xuacu Saturio <xuacusk8@gmail.com>\n" "Language-Team: Asturian <ast@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "Nome del ficheru de l'autoridá de certificáu raíz" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "Nome del ficheru de clave privada" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "Nome del ficheru de llista de refugu de certificáu raíz" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:80 #, python-format msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" msgstr "" -#: nova/exception.py:86 -msgid "Uncaught exception" +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:86 #, python-format -msgid "(%s) publish (key: %s) %s" +msgid "check_instance_lock: admin: |%s|" msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Publishing to route %s" +msgid "check_instance_lock: executing: |%s|" msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Declaring queue %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:180 #, python-format -msgid "Declaring exchange %s" +msgid "instance %s: starting..." msgstr "" -#: nova/fakerabbit.py:95 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Binding %s to %s with key %s" +msgid "instance %s: Failed to spawn" msgstr "" -#: nova/fakerabbit.py:120 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "Getting from %s: %s" +msgid "Terminating instance %s" msgstr "" -#: nova/rpc.py:92 +#: ../nova/compute/manager.py:255 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgid "Deallocating address %s" msgstr "" -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:268 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgid "trying to destroy already destroyed instance: %s" msgstr "" -#: nova/rpc.py:118 -msgid "Reconnected to queue" +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:311 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "instance %s: snapshotting" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:316 #, python-format -msgid "received %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:332 #, python-format -msgid "no method for message: %s" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:335 #, python-format -msgid "No method for message: %s" +msgid "instance %s: setting admin password" msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:353 #, python-format -msgid "Returning exception %s to caller" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:362 #, python-format -msgid "unpacked context: %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:387 #, python-format -msgid "MSG_ID is %s" +msgid "instance %s: unrescuing" msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:406 #, python-format -msgid "response %s" +msgid "instance %s: pausing" msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:423 #, python-format -msgid "topic is %s" +msgid "instance %s: unpausing" msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:440 #, python-format -msgid "message %s" +msgid "instance %s: retrieving diagnostics" msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:453 #, python-format -msgid "Starting %s node" +msgid "instance %s: suspending" msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" msgstr "" -#: nova/service.py:208 -msgid "model server went away" +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "instance %s: reset network" msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 #, python-format -msgid "Serving %s" +msgid "Get console output for instance %s" msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:553 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/twistd.py:268 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Starting %s" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" msgstr "" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Inner Exception: %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" msgstr "" -#: nova/utils.py:54 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Class %s cannot be found" +msgid "Detaching volume from unknown instance %s" msgstr "" -#: nova/utils.py:113 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Fetching %s" +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" msgstr "" -#: nova/utils.py:125 +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "Running cmd (subprocess): %s" +msgid "Host %s not available" msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 #, python-format -msgid "Result was %s" +msgid "Re-exporting %s volumes" msgstr "" -#: nova/utils.py:171 +#: ../nova/volume/manager.py:90 #, python-format -msgid "debug in callback: %s" +msgid "volume %s: skipping export" msgstr "" -#: nova/utils.py:176 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Running %s" +msgid "volume %s: creating" msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:108 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:112 #, python-format -msgid "Invalid backend: %s" +msgid "volume %s: creating export" msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:123 #, python-format -msgid "backend %s" +msgid "volume %s: created successfully" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "volume %s: removing export" msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/volume/manager.py:138 #, python-format -msgid "Authentication Failure: %s" +msgid "volume %s: deleting" msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/volume/manager.py:147 #, python-format -msgid "Authenticated Request For %s:%s)" +msgid "volume %s: deleted successfully" msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "action: %s" +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "arg: %s\t\tval: %s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "Calling %(localname)s %(impl)s" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "NotFound raised: %s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "ApiError raised: %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Unexpected error raised: %s" +msgid "Need to watch instance %s until it's running..." msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" msgstr "" -#: nova/api/ec2/admin.py:84 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Creating new user: %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Deleting user: %s" +msgid "Starting Bridge interface for %s" msgstr "" -#: nova/api/ec2/admin.py:114 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Adding role %s to user %s for project %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 #, python-format -msgid "Removing role %s from user %s for project %s" +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/utils.py:58 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Inner Exception: %s" msgstr "" -#: nova/api/ec2/admin.py:159 +#: ../nova/utils.py:59 #, python-format -msgid "Create project %s managed by %s" +msgid "Class %s cannot be found" msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:118 #, python-format -msgid "Delete project: %s" +msgid "Fetching %s" msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:130 #, python-format -msgid "Adding user %s to project %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format -msgid "Removing user %s from project %s" +msgid "Result was %s" msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/utils.py:159 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/utils.py:217 #, python-format -msgid "Generating root CA: %s" +msgid "debug in callback: %s" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/utils.py:222 #, python-format -msgid "Create key pair %s" +msgid "Running %s" msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/utils.py:262 #, python-format -msgid "Delete key pair %s" +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/utils.py:265 #, python-format -msgid "%s is not a valid ipProtocol" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/utils.py:374 #, python-format -msgid "Revoke security group ingress %s" +msgid "backend %s" msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "Authorize security group ingress %s" +msgid "Publishing to route %s" msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "This rule already exists in group %s" +msgid "Declaring queue %s" msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "Create Security Group %s" +msgid "Declaring exchange %s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "group %s already exists" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Delete security group %s" +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Get console output for instance %s" +msgid "Created VM %s..." msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/virt/xenapi/vm_utils.py:138 #, python-format -msgid "Create volume of %s GB" +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Detach volume %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "Release address %s" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Associate address %s to instance %s" +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "Disassociate address %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "Reboot instance %r" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:775 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "De-registering image %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Registered image %s with id %s" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "attribute not supported: %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:794 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "invalid id: %s" +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Updating image %s publicity" +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Caught error: %s" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Compute.api::lock %s" +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Compute.api::unlock %s" +msgid "duplicate name found: %s" msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Compute.api::get_lock %s" +msgid "VDI %s is still available" msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Compute.api::pause %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Compute.api::unpause %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "compute.api::suspend %s" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "compute.api::resume %s" +msgid "Re-scanning SR %s" msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "User %s already exists" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "No VDIs found for VM %s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:594 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "User \"%s\" not found" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "Project \"%s\" not found" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "LDAP object for %s doesn't exist" +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "User %s is already a member of the group %s" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "Destroying VBD for VDI %s done." msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Looking up user: %r" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "Failed authorization for access key %s" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format -msgid "No user found for access key %s" +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Using project name = user name (%s)" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "No project called %s could be found" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "Received %s" msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "User %s is not a member of project %s" +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "Invalid signature for user %s" +msgid "No service for %(host)s, %(binary)s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "The %s role can not be found" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "The %s role is global only" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 #, python-format -msgid "Adding role %s to user %s in project %s" +msgid "No network for id %s" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "Created project %s with manager %s" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "modifying project %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Remove user %s from project %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Deleting project %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "Deleting user %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "Access Key change for user %s" +msgid "No security group with id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1589 #, python-format -msgid "Secret Key change for user %s" +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "No vpn data for project %s" +msgid "No user for id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:1996 #, python-format -msgid "Launching VPN for %s" +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:2035 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/db/sqlalchemy/api.py:2057 #, python-format -msgid "Instance %d has no host" +msgid "on instance %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:94 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Going to run %s instances..." +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Going to try and terminate %s" +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Instance %d was not found during terminate" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Instance %d is already being terminated" +msgid "No disk at %s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "instance %s: is running" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "Failed to load partition: %s" +msgid "instance %s: booted" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "Unknown instance type: %s" +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:77 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:82 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:86 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "instance %s: starting..." +msgid "instance %s: finished toXML method" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "Terminating instance %s" +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/network/api.py:39 #, python-format -msgid "Disassociating address %s" +msgid "Quota exceeeded for %s, tried to allocate address" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "Deallocating address %s" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/virt/images.py:70 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "Rebooting instance %s" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/compute/manager.py:260 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:286 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "instance %s: snapshotting" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/api/ec2/cloud.py:303 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Create key pair %s" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/api/ec2/cloud.py:311 #, python-format -msgid "instance %s: rescuing" +msgid "Delete key pair %s" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/api/ec2/cloud.py:386 #, python-format -msgid "instance %s: unrescuing" +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/api/ec2/cloud.py:421 #, python-format -msgid "instance %s: pausing" +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: unpausing" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "This rule already exists in group %s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:492 #, python-format -msgid "instance %s: suspending" +msgid "Create Security Group %s" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/api/ec2/cloud.py:495 #, python-format -msgid "instance %s: resuming" +msgid "group %s already exists" msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "instance %s: locking" +msgid "Delete security group %s" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:584 #, python-format -msgid "instance %s: unlocking" +msgid "Create volume of %s GB" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "instance %s: getting locked state" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Detach volume %s" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Disassociate address %s" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "" + +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "updating %s..." +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:875 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "attribute not supported: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "Found instance: %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../bin/nova-api.py:57 #, python-format -msgid "No service for id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:59 #, python-format -msgid "No service for %s, %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:64 #, python-format -msgid "No floating ip for address %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:69 #, python-format -msgid "No instance for id %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:83 #, python-format -msgid "Instance %s not found" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:89 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for id %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "No network for bridge %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Token %s does not exist" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "No quota for project_id %s" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No volume for id %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Volume %s not found" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No export device found for volume %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No target id found for volume %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No security group with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No security group named %s for project: %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No secuity group rule with id %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No user for id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "No user for access key %s" +msgid "Instance not present %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "No project with id %s" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/image/glance.py:97 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "Image %s could not be found" +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Starting Bridge interface for %s" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Launching VPN for %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:190 +#: ../nova/image/s3.py:99 #, python-format -msgid "Leasing IP %s" +msgid "Image %s could not be found" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s leased that isn't associated" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:197 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "Authentication Failure: %s" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "IP %s released that isn't associated" +msgid "action: %s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Unknown S3 value type %r" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "List keys for bucket %s" +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "User %s already exists" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 #, python-format -msgid "Creating bucket %s" +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Getting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "User \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Putting object: %s / %s" +msgid "Project \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Deleting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "updating %s..." msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/volume/san.py:67 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Updating user fields on image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Deleted image: %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:141 #, python-format -msgid "Casting to %s %s for %s" +msgid "Error starting xvp: %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:448 +msgid "IP address" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 #, python-format -msgid "instance %s: deleting instance files %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:187 #, python-format -msgid "No disk at %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rebooted" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: rescued" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/rpc.py:98 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: is running" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: booted" +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:178 #, python-format -msgid "instance %s: failed to boot" +msgid "received %s" msgstr "" -#: nova/virt/libvirt_conn.py:395 +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 #, python-format -msgid "virsh said: %r" +msgid "no method for message: %s" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/rpc.py:253 #, python-format -msgid "data: %r, fpath: %r" +msgid "Returning exception %s to caller" msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/rpc.py:294 #, python-format -msgid "Contents of file %s: %r" +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/rpc.py:316 #, python-format -msgid "instance %s: Creating image" +msgid "MSG_ID is %s" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "response %s" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/rpc.py:373 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "topic is %s" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/rpc.py:374 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "message %s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/volume/driver.py:78 #, python-format -msgid "instance %s: starting toXML method" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/volume/driver.py:87 #, python-format -msgid "instance %s: finished toXML method" +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" +msgstr "" + +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/virt/fake.py:239 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:153 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 #, python-format -msgid "Got exception: %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:216 #, python-format -msgid "%s: _db_content => %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:228 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:233 #, python-format -msgid "Calling %s %s" +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:237 #, python-format -msgid "Calling getter %s" +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/network/manager.py:244 #, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "VDI %s is still available" +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "VHD %s has parent %s" +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Re-scanning SR %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "No VDIs found for VM %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Instance not present %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "suspend: instance not present %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "resume: instance not present %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Instance not found %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Introducing %s..." +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Introduced %s as %s." +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Forgetting SR %s ... " +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:287 #, python-format -msgid "Forgetting SR %s done." +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Detach_volume: %s, %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to locate volume %s" +msgid "Deleting project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Unable to detach volume %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Deleting user %s" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Access Key change for user %s" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "volume group %s doesn't exist" +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Re-exporting %s volumes" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "volume %s: creating" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "volume %s: creating export" +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "volume %s: created successfully" +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "volume %s: removing export" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "volume %s: deleting" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "volume %s: deleted successfully" +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" msgstr "" @@ -7,2131 +7,2862 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" "PO-Revision-Date: 2011-02-07 12:45+0000\n" "Last-Translator: David Pravec <Unknown>\n" "Language-Team: Czech <cs@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-08 05:28+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Při spouštění příkazu došlo k nečekané chybě" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Neošetřená výjimka" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "Jméno souboru kořenové CA" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "Jméno souboru s privátním klíčem" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "Adresář, do kterého ukládáme naše klíče" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "Adresář, do kterého ukládáme naši kořenovou CA" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "Použijeme CA pro každý projekt?" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "Při spouštění příkazu došlo k nečekané chybě" +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:80 #, python-format msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Příkaz: %s\n" -"Vrácená hodnota: %s\n" -"Stdout: %r\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Neošetřená výjimka" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:84 #, python-format -msgid "(%s) publish (key: %s) %s" +msgid "check_instance_lock: locked: |%s|" msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Publishing to route %s" +msgid "check_instance_lock: admin: |%s|" msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring queue %s" +msgid "check_instance_lock: executing: |%s|" msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Declaring exchange %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" msgstr "" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:180 #, python-format -msgid "Binding %s to %s with key %s" +msgid "instance %s: starting..." msgstr "" -#: nova/fakerabbit.py:120 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Getting from %s: %s" +msgid "instance %s: Failed to spawn" msgstr "" -#: nova/rpc.py:92 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "AMQP server na %s:%d není dosažitelný. Zkusím znovu za %d sekund." +msgid "Terminating instance %s" +msgstr "" -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:255 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgid "Deallocating address %s" msgstr "" -"Nepodařilo se připojit k AMQP serveru ani po %d pokusech. Tento proces bude " -"ukončen." -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "Znovu připojeno k AMQP frontě" +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "Selhalo získání zprávy z AMQP fronty" +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:287 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:311 #, python-format -msgid "received %s" -msgstr "získáno: %s" +msgid "instance %s: snapshotting" +msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:316 #, python-format -msgid "no method for message: %s" -msgstr "Není metoda pro zpracování zprávy: %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:332 #, python-format -msgid "No method for message: %s" -msgstr "Není metoda pro zpracování zprávy: %s" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:335 #, python-format -msgid "Returning exception %s to caller" -msgstr "Volajícímu je vrácena výjimka: %s" +msgid "instance %s: setting admin password" +msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:353 #, python-format -msgid "unpacked context: %s" -msgstr "rozbalený obsah: %s" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "Volání asynchronní funkce..." +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:372 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID je %s" +msgid "instance %s: rescuing" +msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:387 #, python-format -msgid "response %s" -msgstr "odpověď %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:406 #, python-format -msgid "topic is %s" +msgid "instance %s: pausing" msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:423 #, python-format -msgid "message %s" -msgstr "zpráva %s" +msgid "instance %s: unpausing" +msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:440 #, python-format -msgid "Starting %s node" +msgid "instance %s: retrieving diagnostics" msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" msgstr "" -#: nova/service.py:208 -msgid "model server went away" +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "instance %s: getting locked state" msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Serving %s" +msgid "instance %s: reset network" msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:543 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" +msgid "instance %s: getting ajax console" msgstr "" -#: nova/twistd.py:268 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Starting %s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/utils.py:53 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Inner Exception: %s" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" msgstr "" -#: nova/utils.py:54 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Class %s cannot be found" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" msgstr "" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Fetching %s" +msgid "Detaching volume from unknown instance %s" msgstr "" -#: nova/utils.py:125 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Running cmd (subprocess): %s" +msgid "Host %s is not alive" msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "Result was %s" +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" msgstr "" -#: nova/utils.py:171 +#: ../nova/volume/manager.py:85 #, python-format -msgid "debug in callback: %s" +msgid "Re-exporting %s volumes" msgstr "" -#: nova/utils.py:176 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Running %s" +msgid "volume %s: skipping export" msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" +msgid "volume %s: creating" msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:108 #, python-format -msgid "Invalid backend: %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:112 #, python-format -msgid "backend %s" +msgid "volume %s: creating export" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "volume %s: removing export" msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/volume/manager.py:138 #, python-format -msgid "Authentication Failure: %s" +msgid "volume %s: deleting" msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/volume/manager.py:147 #, python-format -msgid "Authenticated Request For %s:%s)" +msgid "volume %s: deleted successfully" msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "action: %s" +msgid "%(text)s: _db_content => %(content)s" msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "arg: %s\t\tval: %s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "Calling %(localname)s %(impl)s" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "NotFound raised: %s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "ApiError raised: %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Unexpected error raised: %s" +msgid "Need to watch instance %s until it's running..." msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" msgstr "" -#: nova/api/ec2/admin.py:84 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Creating new user: %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Deleting user: %s" +msgid "Starting Bridge interface for %s" msgstr "" -#: nova/api/ec2/admin.py:114 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Adding role %s to user %s for project %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 #, python-format -msgid "Removing role %s from user %s for project %s" +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/utils.py:58 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Inner Exception: %s" msgstr "" -#: nova/api/ec2/admin.py:159 +#: ../nova/utils.py:59 #, python-format -msgid "Create project %s managed by %s" +msgid "Class %s cannot be found" msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:118 #, python-format -msgid "Delete project: %s" +msgid "Fetching %s" msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:130 #, python-format -msgid "Adding user %s to project %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format -msgid "Removing user %s from project %s" +msgid "Result was %s" msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/utils.py:159 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/utils.py:217 #, python-format -msgid "Generating root CA: %s" +msgid "debug in callback: %s" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/utils.py:222 #, python-format -msgid "Create key pair %s" +msgid "Running %s" msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/utils.py:262 #, python-format -msgid "Delete key pair %s" +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/utils.py:265 #, python-format -msgid "%s is not a valid ipProtocol" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/utils.py:374 #, python-format -msgid "Revoke security group ingress %s" +msgid "backend %s" msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "Authorize security group ingress %s" +msgid "Publishing to route %s" msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "This rule already exists in group %s" +msgid "Declaring queue %s" msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "Create Security Group %s" +msgid "Declaring exchange %s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "group %s already exists" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Delete security group %s" +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Get console output for instance %s" +msgid "Created VM %s..." msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/virt/xenapi/vm_utils.py:138 #, python-format -msgid "Create volume of %s GB" +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Detach volume %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "Release address %s" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Associate address %s to instance %s" +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "Disassociate address %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "Reboot instance %r" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:775 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "De-registering image %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Registered image %s with id %s" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "attribute not supported: %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:794 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "invalid id: %s" +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Updating image %s publicity" +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Caught error: %s" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Compute.api::lock %s" +msgid "Found Xen kernel %s" msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Compute.api::unlock %s" +msgid "duplicate name found: %s" msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Compute.api::get_lock %s" +msgid "VDI %s is still available" msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Compute.api::pause %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Compute.api::unpause %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "compute.api::suspend %s" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "compute.api::resume %s" +msgid "Re-scanning SR %s" msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "User %s already exists" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "No VDIs found for VM %s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:594 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "User \"%s\" not found" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "Project \"%s\" not found" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "LDAP object for %s doesn't exist" +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "User %s is already a member of the group %s" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Looking up user: %r" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "Failed authorization for access key %s" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format -msgid "No user found for access key %s" +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Using project name = user name (%s)" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "No project called %s could be found" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "Received %s" msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "User %s is not a member of project %s" +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "Invalid signature for user %s" +msgid "No service for %(host)s, %(binary)s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "The %s role can not be found" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "The %s role is global only" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 #, python-format -msgid "Adding role %s to user %s in project %s" +msgid "No network for id %s" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "Created project %s with manager %s" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "modifying project %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Remove user %s from project %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Deleting project %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "Deleting user %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "Access Key change for user %s" +msgid "No security group with id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1589 #, python-format -msgid "Secret Key change for user %s" +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "No vpn data for project %s" +msgid "No user for id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:1996 #, python-format -msgid "Launching VPN for %s" +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:2035 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/db/sqlalchemy/api.py:2057 #, python-format -msgid "Instance %d has no host" +msgid "on instance %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:94 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Going to run %s instances..." +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "Connecting to libvirt: %s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Going to try and terminate %s" +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Instance %d was not found during terminate" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Instance %d is already being terminated" +msgid "No disk at %s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "instance %s: is running" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "Failed to load partition: %s" +msgid "instance %s: booted" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "Unknown instance type: %s" +msgid "virsh said: %r" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "Contents of file %(fpath)s: %(contents)r" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:77 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:82 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:86 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "instance %s: starting..." +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "Terminating instance %s" +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/network/api.py:39 #, python-format -msgid "Disassociating address %s" +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "Deallocating address %s" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/virt/images.py:70 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "Rebooting instance %s" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/compute/manager.py:260 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:286 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "instance %s: snapshotting" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/api/ec2/cloud.py:303 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Create key pair %s" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/api/ec2/cloud.py:311 #, python-format -msgid "instance %s: rescuing" +msgid "Delete key pair %s" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/api/ec2/cloud.py:386 #, python-format -msgid "instance %s: unrescuing" +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/api/ec2/cloud.py:421 #, python-format -msgid "instance %s: pausing" +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: unpausing" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "This rule already exists in group %s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:492 #, python-format -msgid "instance %s: suspending" +msgid "Create Security Group %s" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/api/ec2/cloud.py:495 #, python-format -msgid "instance %s: resuming" +msgid "group %s already exists" msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "instance %s: locking" +msgid "Delete security group %s" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:584 #, python-format -msgid "instance %s: unlocking" +msgid "Create volume of %s GB" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "instance %s: getting locked state" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "updating %s..." +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "attribute not supported: %s" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:890 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "invalid id: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "Found instance: %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../bin/nova-api.py:57 #, python-format -msgid "No service for id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:59 #, python-format -msgid "No service for %s, %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:64 #, python-format -msgid "No floating ip for address %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:69 #, python-format -msgid "No instance for id %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:83 #, python-format -msgid "Instance %s not found" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:89 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for id %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "No network for bridge %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Token %s does not exist" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "No quota for project_id %s" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No volume for id %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Volume %s not found" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No export device found for volume %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No target id found for volume %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No security group with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No security group named %s for project: %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No secuity group rule with id %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No user for id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "No user for access key %s" +msgid "Instance not present %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "No project with id %s" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/image/glance.py:97 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "Image %s could not be found" +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Starting Bridge interface for %s" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Launching VPN for %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:190 +#: ../nova/image/s3.py:99 #, python-format -msgid "Leasing IP %s" +msgid "Image %s could not be found" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s leased that isn't associated" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:197 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "Authentication Failure: %s" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "IP %s released that isn't associated" +msgid "action: %s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Unknown S3 value type %r" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "List keys for bucket %s" +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "User %s already exists" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 #, python-format -msgid "Creating bucket %s" +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Getting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "User \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Putting object: %s / %s" +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Deleting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "updating %s..." msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/volume/san.py:67 #, python-format -msgid "Updating user fields on image %s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Deleted image: %s" +msgid "Caught error: %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Casting to %s %s for %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 #, python-format -msgid "instance %s: deleting instance files %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:187 #, python-format -msgid "No disk at %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rebooted" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: rescued" +msgid "Invalid device specified: %s. Example device: /dev/vdb" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: is running" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." msgstr "" +"Nepodařilo se připojit k AMQP serveru ani po %d pokusech. Tento proces bude " +"ukončen." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Znovu připojeno k AMQP frontě" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Selhalo získání zprávy z AMQP fronty" + +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: booted" +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:178 #, python-format -msgid "instance %s: failed to boot" +msgid "received %s" +msgstr "získáno: %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "Není metoda pro zpracování zprávy: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "Není metoda pro zpracování zprávy: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Volajícímu je vrácena výjimka: %s" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "rozbalený obsah: %s" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "Volání asynchronní funkce..." + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID je %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:395 +#: ../nova/rpc.py:364 #, python-format -msgid "virsh said: %r" +msgid "response %s" +msgstr "odpověď %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "zpráva %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/volume/driver.py:87 #, python-format -msgid "data: %r, fpath: %r" +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/volume/driver.py:220 #, python-format -msgid "Contents of file %s: %r" +msgid "FAKE AOE: %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "instance %s: Creating image" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:359 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/wsgi.py:68 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "Starting %(arg0)s on %(host)s:%(port)s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 #, python-format -msgid "instance %s: starting toXML method" +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/virt/fake.py:239 #, python-format -msgid "instance %s: finished toXML method" +msgid "Instance %s Not Found" +msgstr "" + +#: ../nova/network/manager.py:153 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../nova/network/manager.py:519 msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Got exception: %s" +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "%s: _db_content => %s" +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Calling %s %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Calling getter %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Found no network for bridge %s" +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "VBD not found in instance %s" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "VDI %s is still available" +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "VHD %s has parent %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Re-scanning SR %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "No VDIs found for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Instance %s: booted" +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/auth/manager.py:263 #, python-format -msgid "Instance not present %s" +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Starting snapshot for VM %s" +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/auth/manager.py:279 #, python-format -msgid "suspend: instance not present %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#: ../nova/auth/manager.py:287 #, python-format -msgid "resume: instance not present %s" +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Instance not found %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Introducing %s..." +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:414 #, python-format -msgid "Introduced %s as %s." +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Forgetting SR %s ... " +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Forgetting SR %s done." +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "Deleting project %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Deleting user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Access Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "No vpn data for project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/service.py:161 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Detach_volume: %s, %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "Unable to locate volume %s" +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "Unable to detach volume %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/auth/ldapdriver.py:524 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:528 #, python-format -msgid "volume group %s doesn't exist" +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "FAKE AOE: %s" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "FAKE ISCSI: %s" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "Re-exporting %s volumes" +msgid "Group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "volume %s: creating" +msgid "Found non-unique network for bridge %s" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "Found no network for bridge %s" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/api/ec2/admin.py:97 #, python-format -msgid "volume %s: creating export" +msgid "Creating new user: %s" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/api/ec2/admin.py:105 #, python-format -msgid "volume %s: created successfully" +msgid "Deleting user: %s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/api/ec2/admin.py:137 #, python-format -msgid "volume %s: removing export" +msgid "Removing role %(role)s from user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/api/ec2/admin.py:141 #, python-format -msgid "volume %s: deleting" +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/api/ec2/admin.py:159 #, python-format -msgid "volume %s: deleted successfully" +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "AMQP server na %s:%d není dosažitelný. Zkusím znovu za %d sekund." + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Příkaz: %s\n" +#~ "Vrácená hodnota: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" @@ -7,2124 +7,2842 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" "PO-Revision-Date: 2011-01-15 21:46+0000\n" "Last-Translator: Soren Hansen <soren@linux2go.dk>\n" "Language-Team: Danish <da@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-19 06:18+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "Filnavn for privatnøgle" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:80 #, python-format msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" msgstr "" -#: nova/exception.py:86 -msgid "Uncaught exception" +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:86 #, python-format -msgid "(%s) publish (key: %s) %s" +msgid "check_instance_lock: admin: |%s|" msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Publishing to route %s" +msgid "check_instance_lock: executing: |%s|" msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Declaring queue %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:180 #, python-format -msgid "Declaring exchange %s" +msgid "instance %s: starting..." msgstr "" -#: nova/fakerabbit.py:95 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Binding %s to %s with key %s" +msgid "instance %s: Failed to spawn" msgstr "" -#: nova/fakerabbit.py:120 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "Getting from %s: %s" +msgid "Terminating instance %s" msgstr "" -#: nova/rpc.py:92 +#: ../nova/compute/manager.py:255 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgid "Deallocating address %s" msgstr "" -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:268 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgid "trying to destroy already destroyed instance: %s" msgstr "" -#: nova/rpc.py:118 -msgid "Reconnected to queue" +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" +#: ../nova/compute/manager.py:287 +#, python-format +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:311 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "instance %s: snapshotting" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:316 #, python-format -msgid "received %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:332 #, python-format -msgid "no method for message: %s" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:335 #, python-format -msgid "No method for message: %s" +msgid "instance %s: setting admin password" msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:353 #, python-format -msgid "Returning exception %s to caller" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:362 #, python-format -msgid "unpacked context: %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." +#: ../nova/compute/manager.py:372 +#, python-format +msgid "instance %s: rescuing" msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:387 #, python-format -msgid "MSG_ID is %s" +msgid "instance %s: unrescuing" msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:406 #, python-format -msgid "response %s" +msgid "instance %s: pausing" msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:423 #, python-format -msgid "topic is %s" +msgid "instance %s: unpausing" msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:440 #, python-format -msgid "message %s" +msgid "instance %s: retrieving diagnostics" msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:453 #, python-format -msgid "Starting %s node" +msgid "instance %s: suspending" msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" msgstr "" -#: nova/service.py:208 -msgid "model server went away" +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "instance %s: reset network" msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 #, python-format -msgid "Serving %s" +msgid "Get console output for instance %s" msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:553 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/twistd.py:268 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Starting %s" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" msgstr "" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Inner Exception: %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" msgstr "" -#: nova/utils.py:54 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Class %s cannot be found" +msgid "Detaching volume from unknown instance %s" msgstr "" -#: nova/utils.py:113 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Fetching %s" +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" msgstr "" -#: nova/utils.py:125 +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "Running cmd (subprocess): %s" +msgid "Host %s not available" msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 #, python-format -msgid "Result was %s" +msgid "Re-exporting %s volumes" msgstr "" -#: nova/utils.py:171 +#: ../nova/volume/manager.py:90 #, python-format -msgid "debug in callback: %s" +msgid "volume %s: skipping export" msgstr "" -#: nova/utils.py:176 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Running %s" +msgid "volume %s: creating" msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:108 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:112 #, python-format -msgid "Invalid backend: %s" +msgid "volume %s: creating export" msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:123 #, python-format -msgid "backend %s" +msgid "volume %s: created successfully" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "volume %s: removing export" msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/volume/manager.py:138 #, python-format -msgid "Authentication Failure: %s" +msgid "volume %s: deleting" msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/volume/manager.py:147 #, python-format -msgid "Authenticated Request For %s:%s)" +msgid "volume %s: deleted successfully" +msgstr "bind %s: slettet" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "action: %s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "arg: %s\t\tval: %s" +msgid "Calling %(localname)s %(impl)s" msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "NotFound raised: %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "ApiError raised: %s" +msgid "Need to watch instance %s until it's running..." msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Unexpected error raised: %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" msgstr "" -#: nova/api/ec2/admin.py:84 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Creating new user: %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Deleting user: %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:114 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 #, python-format -msgid "Adding role %s to user %s for project %s" +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Removing role %s from user %s for project %s" +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/utils.py:58 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Inner Exception: %s" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/utils.py:118 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Fetching %s" msgstr "" -#: nova/api/ec2/admin.py:159 +#: ../nova/utils.py:130 #, python-format -msgid "Create project %s managed by %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format -msgid "Delete project: %s" +msgid "Result was %s" msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:159 #, python-format -msgid "Adding user %s to project %s" +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:217 #, python-format -msgid "Removing user %s from project %s" +msgid "debug in callback: %s" msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/utils.py:222 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "Running %s" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/utils.py:262 #, python-format -msgid "Generating root CA: %s" +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/utils.py:265 #, python-format -msgid "Create key pair %s" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/utils.py:363 #, python-format -msgid "Delete key pair %s" +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/utils.py:374 #, python-format -msgid "%s is not a valid ipProtocol" +msgid "backend %s" msgstr "" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "Revoke security group ingress %s" +msgid "Publishing to route %s" msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "Authorize security group ingress %s" +msgid "Declaring exchange %s" msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "This rule already exists in group %s" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Create Security Group %s" +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "group %s already exists" +msgid "Created VM %s..." msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/virt/xenapi/vm_utils.py:138 #, python-format -msgid "Delete security group %s" +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Get console output for instance %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Create volume of %s GB" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "Detach volume %s" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "Release address %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "Associate address %s to instance %s" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "Disassociate address %s" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 +#, python-format +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Reboot instance %r" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "De-registering image %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "Registered image %s with id %s" +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "attribute not supported: %s" +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/cloud.py:794 +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 #, python-format -msgid "invalid id: %s" +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:352 +#, python-format +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Updating image %s publicity" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Running pygrub against %s" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Caught error: %s" +msgid "Found Xen kernel %s" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Compute.api::lock %s" +msgid "duplicate name found: %s" msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Compute.api::unlock %s" +msgid "VDI %s is still available" msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Compute.api::get_lock %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Compute.api::pause %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "Compute.api::unpause %s" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "compute.api::suspend %s" +msgid "Re-scanning SR %s" msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "compute.api::resume %s" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "User %s already exists" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "No VDIs found for VM %s" msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:594 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "User \"%s\" not found" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "Project \"%s\" not found" +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "LDAP object for %s doesn't exist" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "User %s is already a member of the group %s" +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "Looking up user: %r" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format -msgid "Failed authorization for access key %s" +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "No user found for access key %s" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "Using project name = user name (%s)" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 #, python-format -msgid "No project called %s could be found" +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "User %s is not a member of project %s" +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "Invalid signature for user %s" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "The %s role can not be found" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "The %s role is global only" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Adding role %s to user %s in project %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Created project %s with manager %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "modifying project %s" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "Remove user %s from project %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "Deleting project %s" +msgid "No security group with id %s" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1589 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "Deleting user %s" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "Access Key change for user %s" +msgid "No user for id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "Secret Key change for user %s" +msgid "No user for access key %s" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No project with id %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "No vpn data for project %s" +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Launching VPN for %s" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "Instance %d has no host" +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:94 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +msgid "Connecting to libvirt: %s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Going to run %s instances..." +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Going to try and terminate %s" +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Instance %d was not found during terminate" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Instance %d is already being terminated" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %s: is running" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "instance %s: booted" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "Failed to load partition: %s" +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "Unknown instance type: %s" +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:77 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:82 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "instance %s: finished toXML method" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "instance %s: starting..." +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Failed to get metadata for ip: %s" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 #, python-format -msgid "Terminating instance %s" +msgid "Quota exceeeded for %s, tried to allocate address" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "Disassociating address %s" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/virt/images.py:70 #, python-format -msgid "Deallocating address %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "Tried to remove non-existant console %(console_id)s." msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "Rebooting instance %s" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:260 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/api/ec2/cloud.py:303 #, python-format -msgid "instance %s: snapshotting" +msgid "Create key pair %s" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/api/ec2/cloud.py:311 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Delete key pair %s" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/api/ec2/cloud.py:386 #, python-format -msgid "instance %s: rescuing" +msgid "%s is not a valid ipProtocol" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 #, python-format -msgid "instance %s: unrescuing" +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: pausing" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "instance %s: unpausing" +msgid "This rule already exists in group %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/api/ec2/cloud.py:492 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Create Security Group %s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:495 #, python-format -msgid "instance %s: suspending" +msgid "group %s already exists" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "instance %s: resuming" +msgid "Delete security group %s" msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:584 #, python-format -msgid "instance %s: locking" +msgid "Create volume of %s GB" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "instance %s: unlocking" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: getting locked state" +msgid "Detach volume %s" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:867 #, python-format -msgid "updating %s..." +msgid "De-registering image %s" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "attribute not supported: %s" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:890 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "invalid id: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "Found instance: %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../bin/nova-api.py:57 #, python-format -msgid "No service for id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:59 #, python-format -msgid "No service for %s, %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:64 #, python-format -msgid "No floating ip for address %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:69 #, python-format -msgid "No instance for id %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:83 #, python-format -msgid "Instance %s not found" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:89 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for id %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "No network for bridge %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Token %s does not exist" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "No quota for project_id %s" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No volume for id %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Volume %s not found" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No export device found for volume %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No target id found for volume %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No security group with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No security group named %s for project: %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No secuity group rule with id %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No user for id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "No user for access key %s" +msgid "Instance not present %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "No project with id %s" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/image/glance.py:97 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "Image %s could not be found" +msgid "VM %(vm)s already halted, skipping shutdown..." msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Starting Bridge interface for %s" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "After terminating instances: %s" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Launching VPN for %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:190 +#: ../nova/image/s3.py:99 #, python-format -msgid "Leasing IP %s" +msgid "Image %s could not be found" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s leased that isn't associated" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:197 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "Authentication Failure: %s" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "IP %s released that isn't associated" +msgid "action: %s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Unknown S3 value type %r" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "List keys for bucket %s" +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "User %s already exists" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 #, python-format -msgid "Creating bucket %s" +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Getting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "User \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Putting object: %s / %s" +msgid "Project \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Deleting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/volume/san.py:67 #, python-format -msgid "Updating user fields on image %s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Deleted image: %s" +msgid "Caught error: %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Casting to %s %s for %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 #, python-format -msgid "instance %s: deleting instance files %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:187 #, python-format -msgid "No disk at %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rebooted" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: rescued" +msgid "Invalid device specified: %s. Example device: /dev/vdb" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: is running" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: booted" +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:178 #, python-format -msgid "instance %s: failed to boot" +msgid "received %s" msgstr "" -#: nova/virt/libvirt_conn.py:395 +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 #, python-format -msgid "virsh said: %r" +msgid "no method for message: %s" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/rpc.py:253 #, python-format -msgid "data: %r, fpath: %r" +msgid "Returning exception %s to caller" msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/rpc.py:294 #, python-format -msgid "Contents of file %s: %r" +msgid "unpacked context: %s" +msgstr "" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/rpc.py:316 #, python-format -msgid "instance %s: Creating image" +msgid "MSG_ID is %s" +msgstr "" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/rpc.py:364 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "response %s" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/rpc.py:373 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "topic is %s" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/rpc.py:374 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "message %s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/volume/driver.py:78 #, python-format -msgid "instance %s: starting toXML method" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/volume/driver.py:87 #, python-format -msgid "instance %s: finished toXML method" +msgid "volume group %s doesn't exist" +msgstr "" + +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: ../nova/volume/driver.py:359 +#, python-format +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../nova/volume/driver.py:414 +#, python-format +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/virt/fake.py:239 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:153 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 #, python-format -msgid "Got exception: %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:216 #, python-format -msgid "%s: _db_content => %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:228 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:233 #, python-format -msgid "Calling %s %s" +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:237 #, python-format -msgid "Calling getter %s" +msgid "IP %s released that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/network/manager.py:241 #, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "VDI %s is still available" +msgid "Unknown S3 value type %r" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "VHD %s has parent %s" +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Re-scanning SR %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "No VDIs found for VM %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Instance not present %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "suspend: instance not present %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "resume: instance not present %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Instance not found %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Introducing %s..." +msgid "Deleted image: %s" +msgstr "" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:263 #, python-format -msgid "Introduced %s as %s." +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Forgetting SR %s ... " +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:287 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Forgetting SR %s done." +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "Invalid signature for user %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Detach_volume: %s, %s" +msgid "Deleting project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Unable to locate volume %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Unable to detach volume %s" +msgid "Deleting user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Access Key change for user %s" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Secret Key change for user %s" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "volume group %s doesn't exist" +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Re-exporting %s volumes" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "volume %s: creating" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "volume %s: creating export" +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "volume %s: created successfully" +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "volume %s: removing export" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "volume %s: deleting" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "volume %s: deleted successfully" -msgstr "bind %s: slettet" +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" @@ -7,2131 +7,2883 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-02-09 10:49+0000\n" -"Last-Translator: Christian Berendt <Unknown>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-04-03 19:42+0000\n" +"Last-Translator: Matthias Loidolt <kedapperdrake@googlemail.com>\n" "Language-Team: German <de@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-10 05:13+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-04-04 05:19+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "Keine Computer gefunden." + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Befehl: %(cmd)s\n" +"Exit-Code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Nicht abgefangene Ausnahme" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "Alle vorhandenen FLAGS:" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "%s wird gestartet" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "Dateiname der Root CA" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "Dateiname des Private Key" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "Dateiname der Certificate Revocation List" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "Soll eine eigenständige CA für jedes Projekt verwendet werden?" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "Unerwarteter Fehler bei Ausführung des Kommandos." +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:78 #, python-format -msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Kommando: %s\n" -"Exit Code: %s\n" -"Stdout: %r\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Nicht abgefangene Ausnahme" +msgid "check_instance_lock: decorating: |%s|" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:80 #, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "(%s) öffentlich (Schlüssel: %s) %s" +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:84 #, python-format -msgid "Publishing to route %s" +msgid "check_instance_lock: locked: |%s|" msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Declaring queue %s" +msgid "check_instance_lock: admin: |%s|" msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring exchange %s" +msgid "check_instance_lock: executing: |%s|" msgstr "" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Binding %s to %s with key %s" +msgid "check_instance_lock: not executing |%s|" msgstr "" -#: nova/fakerabbit.py:120 +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "" + +#: ../nova/compute/manager.py:180 #, python-format -msgid "Getting from %s: %s" -msgstr "Beziehe von %s: %s" +msgid "instance %s: starting..." +msgstr "" -#: nova/rpc.py:92 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgid "instance %s: Failed to spawn" msgstr "" -"Der AMQP server %s:%d ist nicht erreichbar. Erneuter Versuch in %d Sekunden." -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgid "Terminating instance %s" msgstr "" -#: nova/rpc.py:118 -msgid "Reconnected to queue" +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" msgstr "" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:282 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "Rebooting instance %s" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:287 #, python-format -msgid "received %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:311 #, python-format -msgid "no method for message: %s" -msgstr "keine Methode für diese Nachricht gefunden: %s" +msgid "instance %s: snapshotting" +msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:316 #, python-format -msgid "No method for message: %s" -msgstr "keine Methode für diese Nachricht gefunden: %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:332 #, python-format -msgid "Returning exception %s to caller" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:335 #, python-format -msgid "unpacked context: %s" +msgid "instance %s: setting admin password" msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "führe asynchronen Aufruf durch..." +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:362 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID ist %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:372 #, python-format -msgid "response %s" +msgid "instance %s: rescuing" msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:387 #, python-format -msgid "topic is %s" -msgstr "Betreff ist %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:406 #, python-format -msgid "message %s" -msgstr "Nachricht %s" +msgid "instance %s: pausing" +msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:423 #, python-format -msgid "Starting %s node" +msgid "instance %s: unpausing" msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" msgstr "" -#: nova/service.py:208 -msgid "model server went away" +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:503 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "instance %s: unlocking" msgstr "" -"Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden." -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Serving %s" +msgid "instance %s: getting locked state" msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "Alle vorhandenen FLAGS:" +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "PID-Datei %s existiert nicht. Läuft der Daemon nicht?\n" +msgid "Get console output for instance %s" +msgstr "" -#: nova/twistd.py:268 +#: ../nova/compute/manager.py:543 #, python-format -msgid "Starting %s" -msgstr "%s wird gestartet" +msgid "instance %s: getting ajax console" +msgstr "" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Inner Exception: %s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/utils.py:54 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Class %s cannot be found" -msgstr "Klasse %s konnte nicht gefunden werden" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Fetching %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" msgstr "" -#: nova/utils.py:125 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Führe Kommando (subprocess) aus: %s" +msgid "Detaching volume from unknown instance %s" +msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Result was %s" -msgstr "Ergebnis war %s" +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" -#: nova/utils.py:171 +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "debug in callback: %s" +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" msgstr "" -#: nova/utils.py:176 +#: ../nova/volume/manager.py:85 #, python-format -msgid "Running %s" +msgid "Re-exporting %s volumes" msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" +msgid "volume %s: skipping export" msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Invalid backend: %s" +msgid "volume %s: creating" +msgstr "Volume %s: wird erstellt" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:112 #, python-format -msgid "backend %s" +msgid "volume %s: creating export" +msgstr "Volume %s: erstelle Export" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "Volume %s: erfolgreich erstellt" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:136 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "volume %s: removing export" +msgstr "Volume %s: entferne Export" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "Volume %s: wird entfernt" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "Volume %s: erfolgreich entfernt" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "Authentication Failure: %s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "Authenticated Request For %s:%s)" +msgid "Calling %(localname)s %(impl)s" msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "action: %s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "arg: %s\t\tval: %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "NotFound raised: %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "ApiError raised: %s" +msgid "Starting Bridge interface for %s" msgstr "" -#: nova/api/ec2/__init__.py:349 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Unexpected error raised: %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:84 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 #, python-format -msgid "Creating new user: %s" +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Deleting user: %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/admin.py:114 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Adding role %s to user %s for project %s" +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/utils.py:58 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "Inner Exception: %s" msgstr "" -#: nova/api/ec2/admin.py:122 +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "Klasse %s konnte nicht gefunden werden" + +#: ../nova/utils.py:118 #, python-format -msgid "Removing role %s from user %s for project %s" +msgid "Fetching %s" msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Führe Kommando (subprocess) aus: %s" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "Ergebnis war %s" + +#: ../nova/utils.py:159 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/utils.py:222 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Running %s" msgstr "" -#: nova/api/ec2/admin.py:159 +#: ../nova/utils.py:262 #, python-format -msgid "Create project %s managed by %s" +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:265 #, python-format -msgid "Delete project: %s" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:363 #, python-format -msgid "Adding user %s to project %s" +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:374 #, python-format -msgid "Removing user %s from project %s" +msgid "backend %s" msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/fakerabbit.py:49 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "Generating root CA: %s" +msgid "Publishing to route %s" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "Create key pair %s" +msgid "Declaring queue %s" msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "Delete key pair %s" +msgid "Declaring exchange %s" msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "%s is not a valid ipProtocol" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Revoke security group ingress %s" +msgid "Created VM %s..." msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Authorize security group ingress %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "This rule already exists in group %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Create Security Group %s" +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "group %s already exists" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Delete security group %s" +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "Get console output for instance %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "Create volume of %s GB" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:579 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "Detach volume %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "Release address %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "Associate address %s to instance %s" +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Disassociate address %s" +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "Reboot instance %r" +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "De-registering image %s" +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "Registered image %s with id %s" +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "attribute not supported: %s" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/ec2/cloud.py:794 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "invalid id: %s" +msgid "Running pygrub against %s" msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Updating image %s publicity" +msgid "VDI %s is still available" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Caught error: %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Compute.api::lock %s" +msgid "Re-scanning SR %s" msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Compute.api::get_lock %s" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Compute.api::pause %s" +msgid "No VDIs found for VM %s" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:594 #, python-format -msgid "Compute.api::unpause %s" +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "compute.api::suspend %s" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "compute.api::resume %s" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "User %s already exists" +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "User \"%s\" not found" +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "Project \"%s\" not found" +msgid "Destroying VBD for VDI %s done." msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." msgstr "" -#: nova/auth/ldapdriver.py:181 -#, python-format -msgid "LDAP object for %s doesn't exist" +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "User %s is already a member of the group %s" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "Looking up user: %r" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "Failed authorization for access key %s" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 #, python-format -msgid "No user found for access key %s" +msgid "Received %s" msgstr "" -#: nova/auth/manager.py:270 -#, python-format -msgid "Using project name = user name (%s)" +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "No project called %s could be found" +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "User %s is not a member of project %s" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "Invalid signature for user %s" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "The %s role can not be found" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "The %s role is global only" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Adding role %s to user %s in project %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Created project %s with manager %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "modifying project %s" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "Remove user %s from project %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "Deleting project %s" +msgid "No security group with id %s" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1589 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "Deleting user %s" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "Access Key change for user %s" +msgid "No user for id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "Secret Key change for user %s" +msgid "No user for access key %s" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No project with id %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "No vpn data for project %s" +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Launching VPN for %s" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "Instance %d has no host" +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:94 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +msgid "Connecting to libvirt: %s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Going to run %s instances..." +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Going to try and terminate %s" +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Instance %d was not found during terminate" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Instance %d is already being terminated" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %s: is running" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "instance %s: booted" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "Failed to load partition: %s" +msgid "virsh said: %r" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "Unknown instance type: %s" +msgid "Contents of file %(fpath)s: %(contents)r" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:77 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:82 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "instance %s: finished toXML method" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "instance %s: starting..." +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/network/api.py:39 #, python-format -msgid "Terminating instance %s" +msgid "Quota exceeeded for %s, tried to allocate address" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "Disassociating address %s" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/virt/images.py:70 #, python-format -msgid "Deallocating address %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "Rebooting instance %s" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:260 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/api/ec2/cloud.py:303 #, python-format -msgid "instance %s: snapshotting" +msgid "Create key pair %s" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/api/ec2/cloud.py:311 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Delete key pair %s" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/api/ec2/cloud.py:386 #, python-format -msgid "instance %s: rescuing" +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/api/ec2/cloud.py:421 #, python-format -msgid "instance %s: unrescuing" +msgid "Revoke security group ingress %s" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: pausing" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "instance %s: unpausing" +msgid "This rule already exists in group %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/api/ec2/cloud.py:492 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Create Security Group %s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:495 #, python-format -msgid "instance %s: suspending" +msgid "group %s already exists" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "instance %s: resuming" +msgid "Delete security group %s" msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:584 #, python-format -msgid "instance %s: locking" +msgid "Create volume of %s GB" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "instance %s: unlocking" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: getting locked state" +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:867 #, python-format -msgid "updating %s..." +msgid "De-registering image %s" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "attribute not supported: %s" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:890 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "invalid id: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "" + +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "Found instance: %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../bin/nova-api.py:57 #, python-format -msgid "No service for id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:59 #, python-format -msgid "No service for %s, %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:64 #, python-format -msgid "No floating ip for address %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:69 #, python-format -msgid "No instance for id %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:83 #, python-format -msgid "Instance %s not found" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:89 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for id %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "No network for bridge %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Token %s does not exist" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "No quota for project_id %s" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No volume for id %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Volume %s not found" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No export device found for volume %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No target id found for volume %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No security group with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No security group named %s for project: %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No secuity group rule with id %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No user for id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "No user for access key %s" +msgid "Instance not present %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "No project with id %s" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/image/glance.py:97 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "Image %s could not be found" +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Starting Bridge interface for %s" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Launching VPN for %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:190 +#: ../nova/image/s3.py:99 #, python-format -msgid "Leasing IP %s" +msgid "Image %s could not be found" +msgstr "" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." msgstr "" -#: nova/network/manager.py:194 +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s leased that isn't associated" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:197 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "Authentication Failure: %s" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "IP %s released that isn't associated" +msgid "action: %s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Unknown S3 value type %r" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "List keys for bucket %s" +msgid "Unexpected error raised: %s" msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "User %s already exists" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 #, python-format -msgid "Creating bucket %s" +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Getting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "User \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Putting object: %s / %s" +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Deleting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/volume/san.py:67 #, python-format -msgid "Updating user fields on image %s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Deleted image: %s" +msgid "Caught error: %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Casting to %s %s for %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 #, python-format -msgid "instance %s: deleting instance files %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:187 #, python-format -msgid "No disk at %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rebooted" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: rescued" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/rpc.py:98 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: is running" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "" + +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: booted" +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:178 #, python-format -msgid "instance %s: failed to boot" +msgid "received %s" msgstr "" -#: nova/virt/libvirt_conn.py:395 +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 #, python-format -msgid "virsh said: %r" +msgid "no method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "keine Methode für diese Nachricht gefunden: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "führe asynchronen Aufruf durch..." + +#: ../nova/rpc.py:316 #, python-format -msgid "data: %r, fpath: %r" +msgid "MSG_ID is %s" +msgstr "MSG_ID ist %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/rpc.py:364 #, python-format -msgid "Contents of file %s: %r" +msgid "response %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/rpc.py:373 #, python-format -msgid "instance %s: Creating image" +msgid "topic is %s" +msgstr "Betreff ist %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "Nachricht %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:87 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/volume/driver.py:220 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/volume/driver.py:347 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/volume/driver.py:359 #, python-format -msgid "instance %s: starting toXML method" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: finished toXML method" +msgid "Sheepdog is not working: %s" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 +#, python-format +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/virt/fake.py:239 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:153 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 #, python-format -msgid "Got exception: %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:216 #, python-format -msgid "%s: _db_content => %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:228 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:233 #, python-format -msgid "Calling %s %s" +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:237 #, python-format -msgid "Calling getter %s" +msgid "IP %s released that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/network/manager.py:241 #, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "VDI %s is still available" +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "VHD %s has parent %s" +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Re-scanning SR %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "No VDIs found for VM %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Instance not present %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "suspend: instance not present %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "resume: instance not present %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Instance not found %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Introducing %s..." +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Introduced %s as %s." +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Forgetting SR %s ... " +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:287 #, python-format -msgid "Forgetting SR %s done." +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:414 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Detach_volume: %s, %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to locate volume %s" +msgid "Deleting project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Unable to detach volume %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Deleting user %s" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Access Key change for user %s" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "volume group %s doesn't exist" +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Re-exporting %s volumes" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "volume %s: creating" -msgstr "Volume %s: wird erstellt" +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "volume %s: creating lv of size %sG" -msgstr "Volume %s: erstelle LV mit %sG" +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "volume %s: creating export" -msgstr "Volume %s: erstelle Export" +msgid "The group at dn %s doesn't exist" +msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "volume %s: created successfully" -msgstr "Volume %s: erfolgreich erstellt" +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "volume %s: removing export" -msgstr "Volume %s: entferne Export" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "volume %s: deleting" -msgstr "Volume %s: wird entfernt" +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "volume %s: deleted successfully" -msgstr "Volume %s: erfolgreich entfernt" +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Kommando: %s\n" +#~ "Exit Code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" + +#, python-format +#~ msgid "(%s) publish (key: %s) %s" +#~ msgstr "(%s) öffentlich (Schlüssel: %s) %s" + +#, python-format +#~ msgid "Getting from %s: %s" +#~ msgstr "Beziehe von %s: %s" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "Der AMQP server %s:%d ist nicht erreichbar. Erneuter Versuch in %d Sekunden." + +#, python-format +#~ msgid "volume %s: creating lv of size %sG" +#~ msgstr "Volume %s: erstelle LV mit %sG" + +#, python-format +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "Datastore %s ist nicht erreichbar. Versuche es erneut in %d Sekunden." @@ -7,2171 +7,3351 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-18 14:56+0000\n" -"Last-Translator: Javier Turégano <Unknown>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-03-17 15:54+0000\n" +"Last-Translator: Erick Huezo <erickhuezo@gmail.com>\n" "Language-Team: Spanish <es@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "No se han encontrado hosts" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Sucedió un error inesperado mientras el comando se ejecutaba." + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Excepción no controlada" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "Cuota excedida. No puedes crear un volumen con tamaño %sG" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "El estado del volumen debe estar disponible" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "El volumen ya está asociado previamente" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "El volumen ya ha sido desasociado previamente" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "Fallo lectura de IP Privada" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "Fallo lectura de IP(s) Publicas" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "%(param)s propiedad no encontrada para la imagen %(_image_id)s" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "No se definio una Keypairs" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "Numero de argumentos incorrectos" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "el pidfile %s no existe. ¿No estará el demonio parado?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "No se encontró proceso" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "Sirviendo %s" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de opciones:" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "Comenzando %s" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "La instancia %s no se ha encontrado" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Imposible adjuntar volumen a la instancia %s" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Imposible encontrar volumen %s" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Imposible desasociar volumen %s" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "Tipo de instancia desconocido: %s" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "Nombre de fichero de la CA raíz" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "Nombre de fichero de la clave privada" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "Nombre de fichero de la lista de certificados de revocación raíz" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "Donde guardamos nuestras claves" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "Dónde guardamos nuestra CA raíz" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "¿Deberíamos usar una CA para cada proyecto?" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" "Sujeto (Subject) para el certificado de usuarios, %s para el proyecto, " "usuario, marca de tiempo" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "" "Sujeto (Subject) para el certificado del proyecto, %s para el proyecto, " "marca de tiempo" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "" "Sujeto (Subject) para el certificado para vpns, %s para el proyecto, marca " "de tiempo" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "Sucedió un error inesperado mientras el comando se ejecutaba." +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: ../nova/compute/manager.py:80 #, python-format msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Comando: %s\n" -"Código de salida: %s\n" -"Stdout: %s\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Excepción no controlada" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:84 #, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "(%s) públicar (clave: %s) %s" +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Publishing to route %s" -msgstr "Publicando la ruta %s" +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring queue %s" -msgstr "Declarando cola %s" +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: ejecutando: |%s|" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Declaring exchange %s" -msgstr "Declarando intercambio %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: no ejecutando |%s|" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "La instancia ha sido creada previamente" + +#: ../nova/compute/manager.py:180 +#, python-format +msgid "instance %s: starting..." +msgstr "instancia %s: iniciando..." + +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "Instancia %s: no se pudo iniciar" + +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 +#, python-format +msgid "Terminating instance %s" +msgstr "Finalizando la instancia %s" + +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "Desasociando la dirección %s" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:268 #, python-format -msgid "Binding %s to %s with key %s" -msgstr "Asociando %s a %s con clave %s" +msgid "trying to destroy already destroyed instance: %s" +msgstr "intentando finalizar una instancia que ya había sido finalizada: %s" -#: nova/fakerabbit.py:120 +#: ../nova/compute/manager.py:282 #, python-format -msgid "Getting from %s: %s" -msgstr "Obteniendo desde %s: %s" +msgid "Rebooting instance %s" +msgstr "Reiniciando instancia %s" -#: nova/rpc.py:92 +#: ../nova/compute/manager.py:287 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -"El servidor AMQP en %s:%d no se puede alcanzar. Se reintentará en %d " -"segundos." -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:311 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgid "instance %s: snapshotting" +msgstr "instancia %s: creando snapshot" + +#: ../nova/compute/manager.py:316 +#, python-format +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -"Imposible conectar al servidor AMQP después de %d intentos. Apagando." -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "Reconectado a la cola" +#: ../nova/compute/manager.py:332 +#, python-format +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "Fallo al obtener el mensaje de la cola" +#: ../nova/compute/manager.py:335 +#, python-format +msgid "instance %s: setting admin password" +msgstr "" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:353 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:362 #, python-format -msgid "received %s" -msgstr "recibido %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:372 #, python-format -msgid "no method for message: %s" -msgstr "no hay método para el mensaje: %s" +msgid "instance %s: rescuing" +msgstr "instancia %s: rescatando" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:387 #, python-format -msgid "No method for message: %s" -msgstr "No hay método para el mensaje: %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:406 #, python-format -msgid "Returning exception %s to caller" +msgid "instance %s: pausing" +msgstr "instancia %s: pausando" + +#: ../nova/compute/manager.py:423 +#, python-format +msgid "instance %s: unpausing" +msgstr "instnacia %s: continuando tras pausa" + +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instancia %s: obteniendo los diagnosticos" + +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:472 #, python-format -msgid "unpacked context: %s" -msgstr "contenido desempaquetado: %s" +msgid "instance %s: resuming" +msgstr "instancia %s: continuando" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "Haciendo una llamada asíncrona..." +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "instancia %s: bloqueando" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:503 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID es %s" +msgid "instance %s: unlocking" +msgstr "instancia %s: desbloqueando" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:513 #, python-format -msgid "response %s" -msgstr "respuesta %s" +msgid "instance %s: getting locked state" +msgstr "instancia %s: pasando a estado bloqueado" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:526 #, python-format -msgid "topic is %s" +msgid "instance %s: reset network" +msgstr "instancia %s: reiniciar redes" + +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obtener salida de la consola para la instancia %s" + +#: ../nova/compute/manager.py:543 +#, python-format +msgid "instance %s: getting ajax console" +msgstr "instancia %s: obteniendo consola ajax" + +#: ../nova/compute/manager.py:553 +#, python-format +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" +"instancia %(instance_id)s: adjuntando volumen %(volume_id)s a %(mountpoint)s" -#: nova/rpc.py:366 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "message %s" -msgstr "mensaje %s" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" +"instancia %(instance_id)s: adjuntar fallo %(mountpoint)s, removiendo" -#: nova/service.py:157 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Starting %s node" -msgstr "Inciando nodo %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" +"Quitar el volumen %(volume_id)s del punto de montaje %(mp)s en la instancia " +"%(instance_id)s" -#: nova/service.py:169 -msgid "Service killed that has no database entry" -msgstr "Se detuvo un servicio sin entrada en la base de datos" +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "Desvinculando volumen de instancia desconocida %s" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." -msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "Host %s no responde" -#: nova/service.py:202 -msgid "Recovered model server connection!" -msgstr "Recuperada la conexión al servidor de modelos." +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "Todos los hosts tienen demasiados cores" -#: nova/service.py:208 -msgid "model server went away" -msgstr "el servidor de modelos se ha ido" +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "Host %s no disponible" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "Todos los hosts tienen demasiados gigabytes" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "Todos los hosts tienen demasiadas redes" + +#: ../nova/volume/manager.py:85 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "Re-exporting %s volumes" +msgstr "Exportando de nuevo los volumenes %s" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" msgstr "" -"El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos." -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Serving %s" -msgstr "Sirviendo %s" +msgid "volume %s: creating" +msgstr "volumen %s: creando" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de opciones:" +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" -#: nova/twistd.py:211 +#: ../nova/volume/manager.py:112 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "el pidfile %s no existe. ¿No estará el demonio parado?\n" +msgid "volume %s: creating export" +msgstr "volumen %s: exportando" -#: nova/twistd.py:268 +#: ../nova/volume/manager.py:123 #, python-format -msgid "Starting %s" -msgstr "Comenzando %s" +msgid "volume %s: created successfully" +msgstr "volumen %s: creado satisfactoriamente" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "El volumen todavía está asociado" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "Volumen no local a este nodo" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "volumen %s: eliminando exportación" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "volumen %s: eliminando" -#: nova/utils.py:53 +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "volumen %s: eliminado satisfactoriamente" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "Lanzando NotImplemented" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake no tiene una implementación para %s" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "Llanado al adquiridor %s" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake no tiene una implementación para %s o ha sido llamada con un " +"número incorrecto de argumentos" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "No puedo probar las imágenes sin un entorno real virtual" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "Hay que vigilar la instancia %s hasta que este en ejecución..." + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "Fallo al abrir conexión con el hypervisor" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "Iniciando interfaz VLAN %s" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "Iniciando interfaz puente para %s" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "Excepción al recargar la configuración de dnsmasq: %s" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "El pid %d está pasado, relanzando dnsmasq" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "Al matar dnsmasq se lanzó %s" + +#: ../nova/utils.py:58 #, python-format msgid "Inner Exception: %s" msgstr "Excepción interna: %s" -#: nova/utils.py:54 +#: ../nova/utils.py:59 #, python-format msgid "Class %s cannot be found" msgstr "La clase %s no ha podido ser encontrada." -#: nova/utils.py:113 +#: ../nova/utils.py:118 #, python-format msgid "Fetching %s" msgstr "Obteniendo %s" -#: nova/utils.py:125 +#: ../nova/utils.py:130 #, python-format msgid "Running cmd (subprocess): %s" msgstr "Ejecutando cmd (subprocesos): %s" -#: nova/utils.py:138 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format msgid "Result was %s" msgstr "El resultado fue %s" -#: nova/utils.py:171 +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "" + +#: ../nova/utils.py:217 #, python-format msgid "debug in callback: %s" msgstr "Depuración de la devolución de llamada: %s" -#: nova/utils.py:176 +#: ../nova/utils.py:222 #, python-format msgid "Running %s" msgstr "Ejecutando %s" -#: nova/utils.py:207 +#: ../nova/utils.py:262 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "No puedo obtener IP, usando 127.0.0.1 %s" +msgid "Link Local address is not found.:%s" +msgstr "" + +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" -#: nova/utils.py:289 +#: ../nova/utils.py:363 #, python-format msgid "Invalid backend: %s" msgstr "backend inválido: %s" -#: nova/utils.py:300 +#: ../nova/utils.py:374 #, python-format msgid "backend %s" msgstr "backend %s" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "Demasiados intentos de autenticacion fallidos." +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "Publicando la ruta %s" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "Declarando cola %s" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "Declarando intercambio %s" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "Creada VM %s..." + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "VBD no encontrado en la instancia %s" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "Imposible desconectar VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "Imposible destruir VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -"La clave de acceso %s ha tenido %d fallos de autenticación y se bloqueará " -"por %d minutos." -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "Authentication Failure: %s" -msgstr "Fallo de autenticación: %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Authenticated Request For %s:%s)" -msgstr "Solicitud de autenticación para %s:%s" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "action: %s" -msgstr "acción: %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "arg: %s\t\tval: %s" -msgstr "arg: %s \t \t val: %s" +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" -msgstr "Solicitud no autorizada para controller=%s y action=%s" +msgid "Glance image %s" +msgstr "" -#: nova/api/ec2/__init__.py:339 +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 #, python-format -msgid "NotFound raised: %s" -msgstr "No encontrado: %s" +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "ApiError raised: %s" -msgstr "Sucedió un ApiError: %s" +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Unexpected error raised: %s" -msgstr "Sucedió un error inexperado: %s" +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "Buscando vid %s para el kernel PV" + +#: ../nova/virt/xenapi/vm_utils.py:397 +#, python-format +msgid "PV Kernel in VDI:%s" msgstr "" -"Ha sucedido un error desconocido. Por favor repite el intento de nuevo." -#: nova/api/ec2/admin.py:84 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "Creating new user: %s" -msgstr "Creando nuevo usuario: %s" +msgid "Running pygrub against %s" +msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Deleting user: %s" -msgstr "Eliminando usuario: %s" +msgid "Found Xen kernel %s" +msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Adding role %s to user %s for project %s" -msgstr "Añadiendo rol %s al usuario %s para el proyecto %s" +msgid "duplicate name found: %s" +msgstr "se ha encontrado un nombre duplicado: %s" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Adding sitewide role %s to user %s" -msgstr "Añadiendo rol global %s al usuario %s" +msgid "VDI %s is still available" +msgstr "VDI %s está todavía disponible" -#: nova/api/ec2/admin.py:122 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Removing role %s from user %s for project %s" -msgstr "Eliminando rol %s del usuario %s para el proyecto %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Removing sitewide role %s from user %s" -msgstr "Eliminando rol global %s del usuario %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" -msgstr "la operación debe ser añadir o eliminar" +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Getting x509 for user: %s on project: %s" -msgstr "Obteniendo x509 para el usuario: %s en el proyecto %s" +msgid "Re-scanning SR %s" +msgstr "Re-escaneando SR %s" -#: nova/api/ec2/admin.py:159 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Create project %s managed by %s" -msgstr "Creación del proyecto %s gestionada por %s" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Delete project: %s" -msgstr "Borrar proyecto: %s" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "No se han encontrado VDI's para VM %s" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "Recibido %s" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "El uso de una petición de contexto vacía está en desuso" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "No hay servicio para el id %s" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "No hay ip flotante para la dirección %s" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "No hay red para el id %s" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "No hay red para el puente %s" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "No hay red para la instancia %s" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "El token %s no existe" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "No hay quota para el project:id %s" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "El volumen %s no se ha encontrado" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "No se ha encontrado dispositivo exportado para el volumen %s" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "No se ha encontrado id de destino para el volumen %s" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "No hay un grupo de seguridad con el id %s" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "No hay una regla para el grupo de seguridad con el id %s" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "No hay un usuario con el id %s" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "No hay un usuario para la clave de acceso %s" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "No hay proyecto con id %s" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "Conectando a libvirt: %s" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "Conexión a libvirt rota" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "No hay disco en %s" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" +"El snapshotting de instancias no está soportado en libvirt en este momento" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "instancia %s: reiniciada" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Adding user %s to project %s" -msgstr "Añadiendo usuario %s al proyecto %s" +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot falló: %s" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "instancia %s: rescatada" -#: nova/api/ec2/admin.py:188 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "Removing user %s from project %s" -msgstr "Eliminando usuario %s del proyecto %s" +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue falló: %s" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" -msgstr "Solicitud de API no soportada: controller=%s,action=%s" +msgid "instance %s: is running" +msgstr "instancia %s: está ejecutándose" -#: nova/api/ec2/cloud.py:117 +#: ../nova/virt/libvirt_conn.py:422 +#, python-format +msgid "instance %s: booted" +msgstr "instancia %s: arrancada" + +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 +#, python-format +msgid "instance %s: failed to boot" +msgstr "insntancia %s: falló al arrancar" + +#: ../nova/virt/libvirt_conn.py:436 +#, python-format +msgid "virsh said: %r" +msgstr "virsh dijo: %r" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "genial, es un dispositivo" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "instancia %s: Creando imagen" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" + +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "instancia %s: comenzando método toXML" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "instancia %s: finalizado método toXML" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "Fallo al generar metadatos para la ip %s" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Intento de instanciar sigleton" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "Quota excedida para %s, intentando asignar direcciones" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" +"La quota de direcciones ha sido excedida. No puedes asignar más direcciones" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "Destino %s asignado" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "Debe de implementar un horario de reserva" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format msgid "Generating root CA: %s" msgstr "Generando CA raiz: %s" -#: nova/api/ec2/cloud.py:277 +#: ../nova/api/ec2/cloud.py:303 #, python-format msgid "Create key pair %s" msgstr "Creando par de claves %s" -#: nova/api/ec2/cloud.py:285 +#: ../nova/api/ec2/cloud.py:311 #, python-format msgid "Delete key pair %s" msgstr "Borrar para de claves %s" -#: nova/api/ec2/cloud.py:357 +#: ../nova/api/ec2/cloud.py:386 #, python-format msgid "%s is not a valid ipProtocol" msgstr "%s no es un ipProtocol valido" -#: nova/api/ec2/cloud.py:361 +#: ../nova/api/ec2/cloud.py:390 msgid "Invalid port range" msgstr "Rango de puerto inválido" -#: nova/api/ec2/cloud.py:392 +#: ../nova/api/ec2/cloud.py:421 #, python-format msgid "Revoke security group ingress %s" msgstr "Revocar ingreso al grupo de seguridad %s" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 msgid "No rule for the specified parameters." msgstr "No hay regla para los parámetros especificados." -#: nova/api/ec2/cloud.py:421 +#: ../nova/api/ec2/cloud.py:450 #, python-format msgid "Authorize security group ingress %s" msgstr "Autorizar ingreso al grupo de seguridad %s" -#: nova/api/ec2/cloud.py:432 +#: ../nova/api/ec2/cloud.py:464 #, python-format msgid "This rule already exists in group %s" msgstr "Esta regla ya existe en el grupo %s" -#: nova/api/ec2/cloud.py:460 +#: ../nova/api/ec2/cloud.py:492 #, python-format msgid "Create Security Group %s" msgstr "Crear Grupo de Seguridad %s" -#: nova/api/ec2/cloud.py:463 +#: ../nova/api/ec2/cloud.py:495 #, python-format msgid "group %s already exists" msgstr "el grupo %s ya existe" -#: nova/api/ec2/cloud.py:475 +#: ../nova/api/ec2/cloud.py:507 #, python-format msgid "Delete security group %s" msgstr "Borrar grupo de seguridad %s" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 -#, python-format -msgid "Get console output for instance %s" -msgstr "Obtener salida de la consola para la instancia %s" - -#: nova/api/ec2/cloud.py:543 +#: ../nova/api/ec2/cloud.py:584 #, python-format msgid "Create volume of %s GB" msgstr "Crear volumen de %s GB" -#: nova/api/ec2/cloud.py:567 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "Attach volume %s to instacne %s at %s" -msgstr "Asociar volumen %s a la instancia %s en %s" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/api/ec2/cloud.py:629 #, python-format msgid "Detach volume %s" msgstr "Desasociar volumen %s" -#: nova/api/ec2/cloud.py:686 +#: ../nova/api/ec2/cloud.py:761 msgid "Allocate address" msgstr "Asignar dirección" -#: nova/api/ec2/cloud.py:691 +#: ../nova/api/ec2/cloud.py:766 #, python-format msgid "Release address %s" msgstr "Liberar dirección %s" -#: nova/api/ec2/cloud.py:696 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "Associate address %s to instance %s" -msgstr "Asociar dirección %s a la instancia %s" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/api/ec2/cloud.py:780 #, python-format msgid "Disassociate address %s" msgstr "Desasociar dirección %s" -#: nova/api/ec2/cloud.py:730 +#: ../nova/api/ec2/cloud.py:807 msgid "Going to start terminating instances" msgstr "Se va a iniciar la finalización de las instancias" -#: nova/api/ec2/cloud.py:738 +#: ../nova/api/ec2/cloud.py:815 #, python-format msgid "Reboot instance %r" msgstr "Reiniciar instancia %r" -#: nova/api/ec2/cloud.py:775 +#: ../nova/api/ec2/cloud.py:867 #, python-format msgid "De-registering image %s" msgstr "Des-registrando la imagen %s" -#: nova/api/ec2/cloud.py:783 +#: ../nova/api/ec2/cloud.py:875 #, python-format -msgid "Registered image %s with id %s" -msgstr "Registrada imagen %s con id %s" +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format msgid "attribute not supported: %s" msgstr "atributo no soportado: %s" -#: nova/api/ec2/cloud.py:794 +#: ../nova/api/ec2/cloud.py:890 #, python-format msgid "invalid id: %s" msgstr "id no valido: %s" -#: nova/api/ec2/cloud.py:807 +#: ../nova/api/ec2/cloud.py:903 msgid "user or group not specified" msgstr "usuario o grupo no especificado" -#: nova/api/ec2/cloud.py:809 +#: ../nova/api/ec2/cloud.py:905 msgid "only group \"all\" is supported" msgstr "sólo el grupo \"all\" está soportado" -#: nova/api/ec2/cloud.py:811 +#: ../nova/api/ec2/cloud.py:907 msgid "operation_type must be add or remove" msgstr "operation_type debe ser añadir o eliminar" -#: nova/api/ec2/cloud.py:812 +#: ../nova/api/ec2/cloud.py:908 #, python-format msgid "Updating image %s publicity" msgstr "Actualizando imagen %s públicamente" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../bin/nova-api.py:52 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Fallo al generar metadatos para la ip %s" +msgid "Using paste.deploy config at: %s" +msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../bin/nova-api.py:57 #, python-format -msgid "Caught error: %s" -msgstr "Capturado error: %s" - -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." -msgstr "Incluyendo operaciones de administración in API." +msgid "No paste configuration for app: %s" +msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../bin/nova-api.py:59 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../bin/nova-api.py:64 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "Running %s API" +msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../bin/nova-api.py:69 #, python-format -msgid "Compute.api::get_lock %s" -msgstr "Compute.api::get_lock %s" +msgid "No known API applications configured in %s." +msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../bin/nova-api.py:83 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "Starting nova-api node (version %s)" +msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../bin/nova-api.py:89 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "No paste configuration found for: %s" +msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "User %s already exists" -msgstr "El usuario %s ya existe" +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Project can't be created because manager %s doesn't exist" -msgstr "El proyecto no puede ser creado porque el administrador %s no existe" +msgid "Argument %s is required." +msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "Project can't be created because project %s already exists" -msgstr "El proyecto no puede ser creado porque el proyecto %s ya existe" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -"El proyecto no puede ser modificado porque el administrador %s no existe" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "User \"%s\" not found" -msgstr "No se ha encontrado el usuario \"%s\"" +msgid "Attempted to create non-unique name %s" +msgstr "Intentado la creación del nombre no único %s" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "Project \"%s\" not found" -msgstr "No se ha encontrado el proyecto \"%s\"" - -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "Intento de instanciar sigleton" +msgid "instance %(name)s: not enough free memory" +msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "LDAP object for %s doesn't exist" -msgstr "El objeto LDAP para %s no existe" +msgid "Starting VM %s..." +msgstr "Iniciando VM %s..." -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "Project can't be created because user %s doesn't exist" -msgstr "El proyecto no puede ser creado porque el usuario %s no existe" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "User %s is already a member of the group %s" -msgstr "El usuario %s ya es miembro de el grupo %s" +msgid "Invalid value for onset_files: '%s'" +msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Injecting file path: '%s'" msgstr "" -"Se ha intentado eliminar el último miembro de un grupo. Eliminando el grupo " -"%s en su lugar." -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "Group at dn %s doesn't exist" -msgstr "El grupo con dn %s no existe" +msgid "Instance %s: booted" +msgstr "Instancia %s: iniciada" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "Looking up user: %r" -msgstr "Buscando usuario: %r" +msgid "Instance not present %s" +msgstr "Instancia no existente %s" -#: nova/auth/manager.py:263 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "Failed authorization for access key %s" -msgstr "Fallo de autorización para la clave de acceso %s" +msgid "Starting snapshot for VM %s" +msgstr "Comenzando snapshot para la VM %s" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "No user found for access key %s" -msgstr "No se ha encontrado usuario para la clave de acceso %s" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Using project name = user name (%s)" -msgstr "Utilizando nombre de proyecto = nombre de usuario (%s)" +msgid "Finished snapshot and upload for VM %s" +msgstr "Finalizado el snapshot y la subida de la VM %s" -#: nova/auth/manager.py:275 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "VM %(vm)s already halted, skipping shutdown..." msgstr "" -"fallo de autorización: no existe proyecto con el nombre %s (usuario=%s)" -#: nova/auth/manager.py:277 -#, python-format -msgid "No project called %s could be found" -msgstr "No se ha podido encontrar un proyecto con nombre %s" +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -"Fallo de autorización: el usuario %s no es administrador y no es miembro del " -"proyecto %s" -#: nova/auth/manager.py:283 +#: ../nova/virt/xenapi/vmops.py:564 #, python-format -msgid "User %s is not a member of project %s" -msgstr "El usuario %s no es miembro del proyecto %s" +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Invalid signature for user %s" -msgstr "Firma invalida para el usuario %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" -msgstr "Las firmas no concuerdan" +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" +msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" -msgstr "Debes especificar un proyecto" +#: ../nova/tests/test_compute.py:148 +#, python-format +msgid "Running instances: %s" +msgstr "Ejecutando instancias: %s" -#: nova/auth/manager.py:408 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "The %s role can not be found" -msgstr "El rol %s no se ha podido encontrar" +msgid "After terminating instances: %s" +msgstr "Después de terminar las instancias: %s" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "Red a insertar en la configuración de openvpn" -#: nova/auth/manager.py:410 +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "Mascara de red a insertar en la configuración de openvpn" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "The %s role is global only" -msgstr "El rol %s es únicamente global" +msgid "Launching VPN for %s" +msgstr "Lanzando VPN para %s" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 #, python-format -msgid "Adding role %s to user %s in project %s" -msgstr "Añadiendo rol %s al usuario %s en el proyecto %s" +msgid "Image %s could not be found" +msgstr "La imagen %s no ha podido ser encontrada" + +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "Demasiados intentos de autenticacion fallidos." -#: nova/auth/manager.py:438 +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "Removing role %s from user %s on project %s" -msgstr "Eliminando rol %s al usuario %s en el proyecto %s" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "Created project %s with manager %s" -msgstr "Proyecto %s creado con administrador %s" +msgid "Authentication Failure: %s" +msgstr "Fallo de autenticación: %s" -#: nova/auth/manager.py:523 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "modifying project %s" -msgstr "modificando proyecto %s" +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "Remove user %s from project %s" -msgstr "Eliminar usuario %s del proyecto %s" +msgid "action: %s" +msgstr "acción: %s" -#: nova/auth/manager.py:581 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "Deleting project %s" -msgstr "Eliminando proyecto %s" +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "Created user %s (admin: %r)" -msgstr "Creado usuario %s (administrador: %r)" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Deleting user %s" -msgstr "Eliminando usuario %s" +msgid "InstanceNotFound raised: %s" +msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Access Key change for user %s" -msgstr "Cambio de clave de acceso para el usuario %s" +msgid "VolumeNotFound raised: %s" +msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/api/ec2/__init__.py:326 #, python-format -msgid "Secret Key change for user %s" -msgstr "Cambio de clave secreta para el usuario %s" +msgid "NotFound raised: %s" +msgstr "No encontrado: %s" -#: nova/auth/manager.py:659 +#: ../nova/api/ec2/__init__.py:329 #, python-format -msgid "Admin status set to %r for user %s" -msgstr "El estado del administrador se ha fijado a %r para el usuario %s" +msgid "ApiError raised: %s" +msgstr "Sucedió un ApiError: %s" -#: nova/auth/manager.py:708 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "No vpn data for project %s" -msgstr "No hay datos vpn para el proyecto %s" +msgid "Unexpected error raised: %s" +msgstr "Sucedió un error inexperado: %s" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" +"Ha sucedido un error desconocido. Por favor repite el intento de nuevo." -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" -msgstr "Red a insertar en la configuración de openvpn" +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "El usuario %s ya existe" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" -msgstr "Mascara de red a insertar en la configuración de openvpn" +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "El proyecto no puede ser creado porque el administrador %s no existe" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Launching VPN for %s" -msgstr "Lanzando VPN para %s" +msgid "Project can't be created because user %s doesn't exist" +msgstr "El proyecto no puede ser creado porque el usuario %s no existe" -#: nova/compute/api.py:67 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Instance %d was not found in get_network_topic" -msgstr "La instancia %d no se ha encontrado en get_network_topic" +msgid "Project can't be created because project %s already exists" +msgstr "El proyecto no puede ser creado porque el proyecto %s ya existe" -#: nova/compute/api.py:73 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Instance %d has no host" -msgstr "La instancia %d no tiene host" +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" +"El proyecto no puede ser modificado porque el administrador %s no existe" -#: nova/compute/api.py:92 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" -msgstr "Quota superada por %s, intentando lanzar %s instancias" +msgid "User \"%s\" not found" +msgstr "No se ha encontrado el usuario \"%s\"" -#: nova/compute/api.py:94 +#: ../nova/auth/dbdriver.py:248 #, python-format +msgid "Project \"%s\" not found" +msgstr "No se ha encontrado el proyecto \"%s\"" + +#: ../nova/virt/xenapi_conn.py:129 msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de este " -"tipo." +"Debes especificar xenapi_connection_url, xenapi_connection_username " +"(opcional), y xenapi_connection_password para usar connection_type=xenapi" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" -msgstr "Creando una instancia raw" +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Going to run %s instances..." -msgstr "Vamos a ejecutar %s insntacias..." +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" -msgstr "Llamando al planificar para %s/%s insntancia %s" +msgid "Got exception: %s" +msgstr "Obtenida excepción %s" -#: nova/compute/api.py:279 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Going to try and terminate %s" -msgstr "Se va a probar y terminar %s" +msgid "updating %s..." +msgstr "actualizando %s..." + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "error inesperado durante la actualización" -#: nova/compute/api.py:283 +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Instance %d was not found during terminate" -msgstr "La instancia %d no se ha encontrado durante la finalización" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" -#: nova/compute/api.py:288 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Instance %d is already being terminated" -msgstr "La instancia %d ha sido finalizada" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" -#: nova/compute/api.py:450 +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "excepción inexperada al obtener la conexión" + +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "Found instance: %s" +msgstr "Encontrada interfaz: %s" + +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -"El dispositivo especificado no es válido: %s. Ejemplo de dispositivo: " -"/dev/vdb" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" -msgstr "¡El volumen no está unido a nada!" +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "Caught error: %s" +msgstr "Capturado error: %s" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "Incluyendo operaciones de administración in API." + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -"El tamaño de la partición de entrada no es divisible de forma uniforme por " -"el tamaño del sector: %d / %d" -#: nova/compute/disk.py:75 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -"Los bytes del almacenamiento local no son divisibles de forma uniforme por " -"el tamaño del sector: %d / %d" -#: nova/compute/disk.py:128 +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "No se puede unir la imagen con el loopback: %s" +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" -#: nova/compute/disk.py:136 +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format msgid "Failed to load partition: %s" msgstr "Fallo al cargar la partición: %s" -#: nova/compute/disk.py:158 +#: ../nova/virt/disk.py:91 #, python-format msgid "Failed to mount filesystem: %s" msgstr "Fallo al montar el sistema de ficheros: %s" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Unknown instance type: %s" -msgstr "Tipo de instancia desconocido: %s" +msgid "nbd device %s did not show up" +msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/disk.py:128 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Could not attach image to loopback: %s" +msgstr "No se puede unir la imagen con el loopback: %s" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" -#: nova/compute/manager.py:71 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" -msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "%(filename)s, line %(line_info)d" +msgstr "" + +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "En el host inicial" -#: nova/compute/manager.py:75 +#: ../nova/virt/hyperv.py:131 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +msgid "Attempt to create duplicate vm %s" +msgstr "Intento de crear una vm duplicada %s" -#: nova/compute/manager.py:77 +#: ../nova/virt/hyperv.py:148 #, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +msgid "Starting VM %s " +msgstr "Comenzando VM %s " -#: nova/compute/manager.py:82 +#: ../nova/virt/hyperv.py:150 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: ejecutando: |%s|" +msgid "Started VM %s " +msgstr "VM %s iniciada " -#: nova/compute/manager.py:86 +#: ../nova/virt/hyperv.py:152 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: no ejecutando |%s|" +msgid "spawn vm failed: %s" +msgstr "Inicio de vm fallido: %s" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" -msgstr "La instancia ha sido creada previamente" +#: ../nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "Fallo al crear la VM %s" -#: nova/compute/manager.py:158 +#: ../nova/virt/hyperv.py:188 #, python-format -msgid "instance %s: starting..." -msgstr "instancia %s: iniciando..." +msgid "Set memory for vm %s..." +msgstr "Se ha establecido la memoria para vm %s..." -#: nova/compute/manager.py:197 +#: ../nova/virt/hyperv.py:198 #, python-format -msgid "instance %s: Failed to spawn" -msgstr "Instancia %s: no se pudo iniciar" +msgid "Set vcpus for vm %s..." +msgstr "Establecidas vcpus para vm %s..." -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Terminating instance %s" -msgstr "Finalizando la instancia %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/virt/hyperv.py:227 #, python-format -msgid "Disassociating address %s" -msgstr "Desasociando la dirección %s" +msgid "Failed to add diskdrive to VM %s" +msgstr "Fallo al añadir unidad de disco a la VM %s" -#: nova/compute/manager.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format -msgid "Deallocating address %s" -msgstr "Desasociando la dirección %s" +msgid "New disk drive path is %s" +msgstr "La nueva ruta para unidad de disco es %s" -#: nova/compute/manager.py:243 +#: ../nova/virt/hyperv.py:247 #, python-format -msgid "trying to destroy already destroyed instance: %s" -msgstr "intentando finalizar una instancia que ya había sido finalizada: %s" +msgid "Failed to add vhd file to VM %s" +msgstr "Fallo al añadir el fichero vhd a la VM %s" -#: nova/compute/manager.py:257 +#: ../nova/virt/hyperv.py:249 #, python-format -msgid "Rebooting instance %s" -msgstr "Reiniciando instancia %s" +msgid "Created disk for %s" +msgstr "Discos creados para %s" -#: nova/compute/manager.py:260 +#: ../nova/virt/hyperv.py:253 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" -msgstr "" -"intentando reiniciar una instancia que no está en ejecución: %s (estado: %s " -"esperado: %s)" +msgid "Creating nic for %s " +msgstr "Creando nic para %s " -#: nova/compute/manager.py:286 +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "Fallo al crear un puerto en el vswitch externo" + +#: ../nova/virt/hyperv.py:273 #, python-format -msgid "instance %s: snapshotting" -msgstr "instancia %s: creando snapshot" +msgid "Failed creating port for %s" +msgstr "Fallo creando puerto para %s" -#: nova/compute/manager.py:289 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -"intentando crear un snapshot de una instancia que no está en ejecución: %s " -"(estado: %s esperado: %s)" -#: nova/compute/manager.py:301 +#: ../nova/virt/hyperv.py:286 #, python-format -msgid "instance %s: rescuing" -msgstr "instancia %s: rescatando" +msgid "Failed to add nic to VM %s" +msgstr "Fallo al añadir nic a la VM %s" -#: nova/compute/manager.py:316 +#: ../nova/virt/hyperv.py:288 #, python-format -msgid "instance %s: unrescuing" +msgid "Created nic for %s " +msgstr "Creando nic para %s " + +#: ../nova/virt/hyperv.py:321 +#, python-format +msgid "WMI job failed: %s" +msgstr "Trabajo WMI falló: %s" + +#: ../nova/virt/hyperv.py:325 +#, python-format +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/virt/hyperv.py:361 #, python-format -msgid "instance %s: pausing" -msgstr "instancia %s: pausando" +msgid "Got request to destroy vm %s" +msgstr "Recibida solicitud para destruir vm %s" -#: nova/compute/manager.py:352 +#: ../nova/virt/hyperv.py:386 #, python-format -msgid "instance %s: unpausing" -msgstr "instnacia %s: continuando tras pausa" +msgid "Failed to destroy vm %s" +msgstr "Fallo al destruir vm %s" -#: nova/compute/manager.py:369 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "instancia %s: obteniendo los diagnosticos" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/virt/hyperv.py:415 #, python-format -msgid "instance %s: suspending" -msgstr "instancia %s: suspendiendo" +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "instance %s: resuming" -msgstr "instancia %s: continuando" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "instance %s: locking" -msgstr "instancia %s: bloqueando" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/compute/api.py:71 #, python-format -msgid "instance %s: unlocking" -msgstr "instancia %s: desbloqueando" +msgid "Instance %d was not found in get_network_topic" +msgstr "La instancia %d no se ha encontrado en get_network_topic" -#: nova/compute/manager.py:442 +#: ../nova/compute/api.py:77 #, python-format -msgid "instance %s: getting locked state" -msgstr "instancia %s: pasando a estado bloqueado" +msgid "Instance %d has no host" +msgstr "La instancia %d no tiene host" -#: nova/compute/manager.py:462 +#: ../nova/compute/api.py:97 #, python-format -msgid "instance %s: attaching volume %s to %s" -msgstr "instancia %s: asociando volumen %s a %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/compute/api.py:99 #, python-format -msgid "instance %s: attach failed %s, removing" -msgstr "instalación %s: asociación fallida %s, eliminando" +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" +"Quota de instancias superada. Sólo puedes ejecutar %s instancias más de este " +"tipo." + +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "Creando una instancia raw" -#: nova/compute/manager.py:493 +#: ../nova/compute/api.py:160 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" -msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s" +msgid "Going to run %s instances..." +msgstr "Vamos a ejecutar %s insntacias..." -#: nova/compute/manager.py:497 +#: ../nova/compute/api.py:187 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "Desvinculando volumen de instancia desconocida %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/compute/api.py:292 #, python-format -msgid "updating %s..." -msgstr "actualizando %s..." +msgid "Going to try to terminate %s" +msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "error inesperado durante la actualización" +#: ../nova/compute/api.py:296 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "La instancia %d no se ha encontrado durante la finalización" -#: nova/compute/monitor.py:355 +#: ../nova/compute/api.py:301 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" -msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\"" +msgid "Instance %d is already being terminated" +msgstr "La instancia %d ha sido finalizada" -#: nova/compute/monitor.py:377 +#: ../nova/compute/api.py:481 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" -msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\"" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" +"El dispositivo especificado no es válido: %s. Ejemplo de dispositivo: " +"/dev/vdb" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" -msgstr "excepción inexperada al obtener la conexión" +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "¡El volumen no está unido a nada!" -#: nova/compute/monitor.py:427 +#: ../nova/rpc.py:98 #, python-format -msgid "Found instance: %s" -msgstr "Encontrada interfaz: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" -msgstr "El uso de una petición de contexto vacía está en desuso" +#: ../nova/rpc.py:103 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" +"Imposible conectar al servidor AMQP después de %d intentos. Apagando." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Reconectado a la cola" -#: nova/db/sqlalchemy/api.py:132 +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Fallo al obtener el mensaje de la cola" + +#: ../nova/rpc.py:159 #, python-format -msgid "No service for id %s" -msgstr "No hay servicio para el id %s" +msgid "Initing the Adapter Consumer for %s" +msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../nova/rpc.py:178 #, python-format -msgid "No service for %s, %s" -msgstr "No hay servicio para %s, %s" +msgid "received %s" +msgstr "recibido %s" -#: nova/db/sqlalchemy/api.py:574 +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 #, python-format -msgid "No floating ip for address %s" -msgstr "No hay ip flotante para la dirección %s" +msgid "no method for message: %s" +msgstr "no hay método para el mensaje: %s" -#: nova/db/sqlalchemy/api.py:668 +#: ../nova/rpc.py:192 #, python-format -msgid "No instance for id %s" -msgstr "No hay instancia con id %s" +msgid "No method for message: %s" +msgstr "No hay método para el mensaje: %s" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../nova/rpc.py:253 #, python-format -msgid "Instance %s not found" -msgstr "La instancia %s no se ha encontrado" +msgid "Returning exception %s to caller" +msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../nova/rpc.py:294 #, python-format -msgid "no keypair for user %s, name %s" -msgstr "no hay par de claves para el usuario %s, nombre %s" +msgid "unpacked context: %s" +msgstr "contenido desempaquetado: %s" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "Haciendo una llamada asíncrona..." + +#: ../nova/rpc.py:316 #, python-format -msgid "No network for id %s" -msgstr "No hay red para el id %s" +msgid "MSG_ID is %s" +msgstr "MSG_ID es %s" -#: nova/db/sqlalchemy/api.py:1036 +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 #, python-format -msgid "No network for bridge %s" -msgstr "No hay red para el puente %s" +msgid "response %s" +msgstr "respuesta %s" -#: nova/db/sqlalchemy/api.py:1050 +#: ../nova/rpc.py:373 #, python-format -msgid "No network for instance %s" -msgstr "No hay red para la instancia %s" +msgid "topic is %s" +msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../nova/rpc.py:374 #, python-format -msgid "Token %s does not exist" -msgstr "El token %s no existe" +msgid "message %s" +msgstr "mensaje %s" -#: nova/db/sqlalchemy/api.py:1205 +#: ../nova/volume/driver.py:78 #, python-format -msgid "No quota for project_id %s" -msgstr "No hay quota para el project:id %s" +msgid "Recovering from a failed execute. Try number %s" +msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" -#: nova/db/sqlalchemy/api.py:1356 +#: ../nova/volume/driver.py:87 #, python-format -msgid "No volume for id %s" -msgstr "No hay volumen para el id %s" +msgid "volume group %s doesn't exist" +msgstr "el grupo de volumenes %s no existe" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/volume/driver.py:220 #, python-format -msgid "Volume %s not found" -msgstr "El volumen %s no se ha encontrado" +msgid "FAKE AOE: %s" +msgstr "Falso AOE: %s" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "No export device found for volume %s" -msgstr "No se ha encontrado dispositivo exportado para el volumen %s" +msgid "FAKE ISCSI: %s" +msgstr "Falso ISCSI: %s" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/volume/driver.py:359 #, python-format -msgid "No target id found for volume %s" -msgstr "No se ha encontrado id de destino para el volumen %s" +msgid "rbd has no pool %s" +msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/volume/driver.py:414 #, python-format -msgid "No security group with id %s" -msgstr "No hay un grupo de seguridad con el id %s" +msgid "Sheepdog is not working: %s" +msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 #, python-format -msgid "No security group named %s for project: %s" -msgstr "No hay un grupo de seguridad con nombre %s para el proyecto: %s" +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../bin/nova-dhcpbridge.py:123 #, python-format -msgid "No secuity group rule with id %s" -msgstr "No hay una regla para el grupo de seguridad con el id %s" +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/fake.py:239 #, python-format -msgid "No user for id %s" -msgstr "No hay un usuario con el id %s" +msgid "Instance %s Not Found" +msgstr "La instancia %s no ha sido encontrada" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/network/manager.py:153 #, python-format -msgid "No user for access key %s" -msgstr "No hay un usuario para la clave de acceso %s" +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "configurando la red del host" + +#: ../nova/network/manager.py:212 #, python-format -msgid "No project with id %s" -msgstr "No hay proyecto con id %s" +msgid "Leasing IP %s" +msgstr "Liberando IP %s" -#: nova/image/glance.py:78 +#: ../nova/network/manager.py:216 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" -msgstr "Parallax ha devuelto un error HTTP %d a la petición para /images" +msgid "IP %s leased that isn't associated" +msgstr "" -#: nova/image/glance.py:97 +#: ../nova/network/manager.py:220 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -"Parallax ha devuelto un error HTTP %d para la petición para /images/detail" -#: nova/image/s3.py:82 +#: ../nova/network/manager.py:228 #, python-format -msgid "Image %s could not be found" -msgstr "La imagen %s no ha podido ser encontrada" +msgid "IP %s leased that was already deallocated" +msgstr "" -#: nova/network/api.py:39 +#: ../nova/network/manager.py:233 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" -msgstr "Quota excedida para %s, intentando asignar direcciones" +msgid "Releasing IP %s" +msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" msgstr "" -"La quota de direcciones ha sido excedida. No puedes asignar más direcciones" -#: nova/network/linux_net.py:176 +#: ../nova/network/manager.py:241 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "Iniciando interfaz VLAN %s" +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/network/manager.py:244 #, python-format -msgid "Starting Bridge interface for %s" -msgstr "Iniciando interfaz puente para %s" +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "Excepción al recargar la configuración de dnsmasq: %s" +msgid "Introducing %s..." +msgstr "Introduciendo %s..." -#: nova/network/linux_net.py:256 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "El pid %d está pasado, relanzando dnsmasq" +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Imposible crear el repositorio de almacenamiento" -#: nova/network/linux_net.py:334 +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Killing dnsmasq threw %s" -msgstr "Al matar dnsmasq se lanzó %s" +msgid "Unable to find SR from VBD %s" +msgstr "Imposible encontrar SR en VBD %s" -#: nova/network/manager.py:135 -msgid "setting network host" -msgstr "configurando la red del host" +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "Olvidando SR %s... " -#: nova/network/manager.py:190 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Leasing IP %s" -msgstr "Liberando IP %s" +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" -#: nova/network/manager.py:194 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "IP %s leased that isn't associated" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/network/manager.py:197 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "IP %s leased to bad mac %s vs %s" -msgstr "IP %s asociada a una mac incorrecta %s vs %s" +msgid "Forgetting SR %s done." +msgstr "Olvidando SR %s completado." -#: nova/network/manager.py:205 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "IP %s released that isn't associated" +msgid "Unable to introduce VDI on SR %s" +msgstr "Incapaz de insertar VDI en SR %s" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "Imposible obtener copia del VDI %s en" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "Inposible insertar VDI para SR %s" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "Punto de montaje no puede ser traducido: %s" + +#: ../nova/objectstore/image.py:262 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "IP %s released that was not leased" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/objectstore/handler.py:106 #, python-format msgid "Unknown S3 value type %r" msgstr "Tipo de valor S3 %r desconocido" -#: nova/objectstore/handler.py:137 +#: ../nova/objectstore/handler.py:137 msgid "Authenticated request" msgstr "Petición autenticada" -#: nova/objectstore/handler.py:182 +#: ../nova/objectstore/handler.py:182 msgid "List of buckets requested" msgstr "Listado de cubos solicitado" -#: nova/objectstore/handler.py:209 +#: ../nova/objectstore/handler.py:209 #, python-format msgid "List keys for bucket %s" msgstr "Lista de claves para el cubo %s" -#: nova/objectstore/handler.py:217 +#: ../nova/objectstore/handler.py:217 #, python-format msgid "Unauthorized attempt to access bucket %s" msgstr "Intento no autorizado para acceder al cubo %s" -#: nova/objectstore/handler.py:235 +#: ../nova/objectstore/handler.py:235 #, python-format msgid "Creating bucket %s" msgstr "Creando el cubo %s" -#: nova/objectstore/handler.py:245 +#: ../nova/objectstore/handler.py:245 #, python-format msgid "Deleting bucket %s" msgstr "Eliminando el cubo %s" -#: nova/objectstore/handler.py:249 +#: ../nova/objectstore/handler.py:249 #, python-format msgid "Unauthorized attempt to delete bucket %s" msgstr "Intento no autorizado de eliminar el cubo %s" -#: nova/objectstore/handler.py:271 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "Getting object: %s / %s" -msgstr "Obteniendo objeto: %s / %s" +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" -msgstr "Intento no autorizado de obtener el objeto %s en el cubo %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Putting object: %s / %s" -msgstr "Colocando objeto: %s / %s" +msgid "Putting object: %(bname)s / %(nm)s" +msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" -msgstr "Intento no autorizado de subir el objeto %s al cubo %s" +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Deleting object: %s / %s" -msgstr "Eliminando objeto: %s / %s" +msgid "Deleting object: %(bname)s / %(nm)s" +msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/objectstore/handler.py:322 +#, python-format +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" + +#: ../nova/objectstore/handler.py:396 #, python-format msgid "Not authorized to upload image: invalid directory %s" msgstr "No autorizado para subir imagen: directorio incorrecto %s" -#: nova/objectstore/handler.py:401 +#: ../nova/objectstore/handler.py:404 #, python-format msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "No autorizado para subir imagen: cubo %s no autorizado" -#: nova/objectstore/handler.py:406 +#: ../nova/objectstore/handler.py:409 #, python-format msgid "Starting image upload: %s" msgstr "Comenzando la subida de la imagen: %s" -#: nova/objectstore/handler.py:420 +#: ../nova/objectstore/handler.py:423 #, python-format msgid "Not authorized to update attributes of image %s" msgstr "No autorizado para actualizar los atributos de la imagen %s" -#: nova/objectstore/handler.py:428 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "Toggling publicity flag of image %s %r" -msgstr "Cambiando los atributos de publicidad de la imagen %s %r" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" -#: nova/objectstore/handler.py:433 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format msgid "Updating user fields on image %s" msgstr "Actualizando los campos de usuario de la imagen %s" -#: nova/objectstore/handler.py:447 +#: ../nova/objectstore/handler.py:450 #, python-format msgid "Unauthorized attempt to delete image %s" msgstr "Intento no autorizado de borrar la imagen %s" -#: nova/objectstore/handler.py:452 +#: ../nova/objectstore/handler.py:455 #, python-format msgid "Deleted image: %s" msgstr "Eliminada imagen: %s" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" -msgstr "No se han encontrado hosts" +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "Buscando usuario: %r" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" -msgstr "Debe de implementar un horario de reserva" +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Fallo de autorización para la clave de acceso %s" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "No se ha encontrado usuario para la clave de acceso %s" -#: nova/scheduler/manager.py:69 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Casting to %s %s for %s" +msgid "Using project name = user name (%s)" +msgstr "Utilizando nombre de proyecto = nombre de usuario (%s)" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" -msgstr "Todos los hosts tienen demasiados cores" +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "No se ha podido encontrar un proyecto con nombre %s" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" -msgstr "Todos los hosts tienen demasiados gigabytes" +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" -msgstr "Todos los hosts tienen demasiadas redes" +#: ../nova/auth/manager.py:289 +#, python-format +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." -msgstr "No puedo probar las imágenes sin un entorno real virtual" +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Firma invalida para el usuario %s" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "Las firmas no concuerdan" -#: nova/tests/test_cloud.py:210 +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "Debes especificar un proyecto" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "Need to watch instance %s until it's running..." -msgstr "Hay que vigilar la instancia %s hasta que este en ejecución..." +msgid "The %s role can not be found" +msgstr "El rol %s no se ha podido encontrar" -#: nova/tests/test_compute.py:104 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Running instances: %s" -msgstr "Ejecutando instancias: %s" +msgid "The %s role is global only" +msgstr "El rol %s es únicamente global" -#: nova/tests/test_compute.py:110 +#: ../nova/auth/manager.py:420 #, python-format -msgid "After terminating instances: %s" -msgstr "Después de terminar las instancias: %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Nested received %s, %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Nested return %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Received %s" -msgstr "Recibido %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Target %s allocated" -msgstr "Destino %s asignado" +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" -msgstr "Fallo al abrir conexión con el hypervisor" +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "modificando proyecto %s" -#: nova/virt/fake.py:210 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Instance %s Not Found" -msgstr "La instancia %s no ha sido encontrada" +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" -#: nova/virt/hyperv.py:118 -msgid "In init host" -msgstr "En el host inicial" +#: ../nova/auth/manager.py:566 +#, python-format +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Attempt to create duplicate vm %s" -msgstr "Intento de crear una vm duplicada %s" +msgid "Deleting project %s" +msgstr "Eliminando proyecto %s" -#: nova/virt/hyperv.py:148 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Starting VM %s " -msgstr "Comenzando VM %s " +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Started VM %s " -msgstr "VM %s iniciada " +msgid "Deleting user %s" +msgstr "Eliminando usuario %s" -#: nova/virt/hyperv.py:152 +#: ../nova/auth/manager.py:669 #, python-format -msgid "spawn vm failed: %s" -msgstr "Inicio de vm fallido: %s" +msgid "Access Key change for user %s" +msgstr "Cambio de clave de acceso para el usuario %s" -#: nova/virt/hyperv.py:169 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Failed to create VM %s" -msgstr "Fallo al crear la VM %s" +msgid "Secret Key change for user %s" +msgstr "Cambio de clave secreta para el usuario %s" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Created VM %s..." -msgstr "Creada VM %s..." +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" -#: nova/virt/hyperv.py:188 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Set memory for vm %s..." -msgstr "Se ha establecido la memoria para vm %s..." +msgid "No vpn data for project %s" +msgstr "No hay datos vpn para el proyecto %s" -#: nova/virt/hyperv.py:198 +#: ../nova/service.py:161 #, python-format -msgid "Set vcpus for vm %s..." -msgstr "Establecidas vcpus para vm %s..." +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "Se detuvo un servicio sin entrada en la base de datos" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "El servicio objeto de base de datos ha desaparecido, recreándolo." + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "Recuperada la conexión al servidor de modelos." + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "el servidor de modelos se ha ido" -#: nova/virt/hyperv.py:202 +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "LDAP user %s already exists" msgstr "" -"Creando disco para %s a través de la asignación del fichero de disco %s" -#: nova/virt/hyperv.py:227 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "Failed to add diskdrive to VM %s" -msgstr "Fallo al añadir unidad de disco a la VM %s" +msgid "LDAP object for %s doesn't exist" +msgstr "El objeto LDAP para %s no existe" -#: nova/virt/hyperv.py:230 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "New disk drive path is %s" -msgstr "La nueva ruta para unidad de disco es %s" +msgid "User %s doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "Failed to add vhd file to VM %s" -msgstr "Fallo al añadir el fichero vhd a la VM %s" +msgid "Group can't be created because group %s already exists" +msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Created disk for %s" -msgstr "Discos creados para %s" +msgid "Group can't be created because user %s doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "Creating nic for %s " -msgstr "Creando nic para %s " +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:272 -msgid "Failed creating a port on the external vswitch" -msgstr "Fallo al crear un puerto en el vswitch externo" +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "Failed creating port for %s" -msgstr "Fallo creando puerto para %s" +msgid "The group at dn %s doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "Created switch port %s on switch %s" -msgstr "Creado puerto %s en el switch %s" +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/auth/ldapdriver.py:524 #, python-format -msgid "Failed to add nic to VM %s" -msgstr "Fallo al añadir nic a la VM %s" +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/auth/ldapdriver.py:528 #, python-format -msgid "Created nic for %s " -msgstr "Creando nic para %s " +msgid "User %s is not a member of the group" +msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "WMI job failed: %s" -msgstr "Trabajo WMI falló: %s" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" +"Se ha intentado eliminar el último miembro de un grupo. Eliminando el grupo " +"%s en su lugar." -#: nova/virt/hyperv.py:322 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " -msgstr "Trabajo WMI ha tenido exito: %s, Transcurrido=%s " +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "Got request to destroy vm %s" -msgstr "Recibida solicitud para destruir vm %s" +msgid "Group at dn %s doesn't exist" +msgstr "El grupo con dn %s no existe" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "Failed to destroy vm %s" -msgstr "Fallo al destruir vm %s" +msgid "Found non-unique network for bridge %s" +msgstr "Encontrada una red no única para el puente %s" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "Del: disk %s vm %s" -msgstr "Del: disco %s vm %s" +msgid "Found no network for bridge %s" +msgstr "No se ha encontrado red para el puente %s" -#: nova/virt/hyperv.py:405 +#: ../nova/api/ec2/admin.py:97 #, python-format -msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +msgid "Creating new user: %s" +msgstr "Creando nuevo usuario: %s" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "Eliminando usuario: %s" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -"Obtenida información para vm %s: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/api/ec2/admin.py:131 #, python-format -msgid "duplicate name found: %s" -msgstr "se ha encontrado un nombre duplicado: %s" +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/api/ec2/admin.py:137 #, python-format -msgid "Successfully changed vm state of %s to %s" -msgstr "Cambio de estado de la vm con éxito de %s a %s" +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/api/ec2/admin.py:141 #, python-format -msgid "Failed to change vm state of %s to %s" -msgstr "Fallo al cambiar el estado de la vm de %s a %s" +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" -#: nova/virt/images.py:70 +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "la operación debe ser añadir o eliminar" + +#: ../nova/api/ec2/admin.py:159 #, python-format -msgid "Finished retreving %s -- placed in %s" -msgstr "Finalizada la obtención de %s -- coloado en %s" +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/api/ec2/admin.py:177 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "Conectando a libvirt: %s" +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" -msgstr "Conexión a libvirt rota" +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/api/ec2/admin.py:200 #, python-format -msgid "instance %s: deleting instance files %s" -msgstr "instancia %s: eliminando los ficheros de la instancia %s" +msgid "Delete project: %s" +msgstr "Borrar proyecto: %s" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/api/ec2/admin.py:214 #, python-format -msgid "No disk at %s" -msgstr "No hay disco en %s" +msgid "Adding user %(user)s to project %(project)s" +msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" msgstr "" -"El snapshotting de instancias no está soportado en libvirt en este momento" -#: nova/virt/libvirt_conn.py:294 #, python-format -msgid "instance %s: rebooted" -msgstr "instancia %s: reiniciada" +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Comando: %s\n" +#~ "Código de salida: %s\n" +#~ "Stdout: %s\n" +#~ "Stderr: %r" -#: nova/virt/libvirt_conn.py:297 #, python-format -msgid "_wait_for_reboot failed: %s" -msgstr "_wait_for_reboot falló: %s" +#~ msgid "(%s) publish (key: %s) %s" +#~ msgstr "(%s) públicar (clave: %s) %s" -#: nova/virt/libvirt_conn.py:340 #, python-format -msgid "instance %s: rescued" -msgstr "instancia %s: rescatada" +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "El servidor AMQP en %s:%d no se puede alcanzar. Se reintentará en %d " +#~ "segundos." -#: nova/virt/libvirt_conn.py:343 #, python-format -msgid "_wait_for_rescue failed: %s" -msgstr "_wait_for_rescue falló: %s" +#~ msgid "Binding %s to %s with key %s" +#~ msgstr "Asociando %s a %s con clave %s" -#: nova/virt/libvirt_conn.py:370 #, python-format -msgid "instance %s: is running" -msgstr "instancia %s: está ejecutándose" +#~ msgid "Getting from %s: %s" +#~ msgstr "Obteniendo desde %s: %s" -#: nova/virt/libvirt_conn.py:381 #, python-format -msgid "instance %s: booted" -msgstr "instancia %s: arrancada" +#~ msgid "Starting %s node" +#~ msgstr "Inciando nodo %s" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 #, python-format -msgid "instance %s: failed to boot" -msgstr "insntancia %s: falló al arrancar" +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "El almacen de datos %s es inalcanzable. Reintentandolo en %d segundos." -#: nova/virt/libvirt_conn.py:395 #, python-format -msgid "virsh said: %r" -msgstr "virsh dijo: %r" +#~ msgid "Couldn't get IP, using 127.0.0.1 %s" +#~ msgstr "No puedo obtener IP, usando 127.0.0.1 %s" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" -msgstr "genial, es un dispositivo" +#, python-format +#~ msgid "" +#~ "Access key %s has had %d failed authentications and will be locked out for " +#~ "%d minutes." +#~ msgstr "" +#~ "La clave de acceso %s ha tenido %d fallos de autenticación y se bloqueará " +#~ "por %d minutos." -#: nova/virt/libvirt_conn.py:407 #, python-format -msgid "data: %r, fpath: %r" -msgstr "datos: %r, fpath: %r" +#~ msgid "arg: %s\t\tval: %s" +#~ msgstr "arg: %s \t \t val: %s" -#: nova/virt/libvirt_conn.py:415 #, python-format -msgid "Contents of file %s: %r" -msgstr "Contenidos del fichero %s: %r" +#~ msgid "Authenticated Request For %s:%s)" +#~ msgstr "Solicitud de autenticación para %s:%s" -#: nova/virt/libvirt_conn.py:449 #, python-format -msgid "instance %s: Creating image" -msgstr "instancia %s: Creando imagen" +#~ msgid "Adding role %s to user %s for project %s" +#~ msgstr "Añadiendo rol %s al usuario %s para el proyecto %s" -#: nova/virt/libvirt_conn.py:505 #, python-format -msgid "instance %s: injecting key into image %s" -msgstr "instancia %s: inyectando clave en la imagen %s" +#~ msgid "Removing role %s from user %s for project %s" +#~ msgstr "Eliminando rol %s del usuario %s para el proyecto %s" -#: nova/virt/libvirt_conn.py:508 #, python-format -msgid "instance %s: injecting net into image %s" -msgstr "instancia %s: inyectando red en la imagen %s" +#~ msgid "Unauthorized request for controller=%s and action=%s" +#~ msgstr "Solicitud no autorizada para controller=%s y action=%s" -#: nova/virt/libvirt_conn.py:516 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" -msgstr "" -"instancia %s: ignorando el error al inyectar datos en la imagen %s (%s)" +#~ msgid "Getting x509 for user: %s on project: %s" +#~ msgstr "Obteniendo x509 para el usuario: %s en el proyecto %s" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 #, python-format -msgid "instance %s: starting toXML method" -msgstr "instancia %s: comenzando método toXML" +#~ msgid "Create project %s managed by %s" +#~ msgstr "Creación del proyecto %s gestionada por %s" -#: nova/virt/libvirt_conn.py:589 #, python-format -msgid "instance %s: finished toXML method" -msgstr "instancia %s: finalizado método toXML" +#~ msgid "Removing user %s from project %s" +#~ msgstr "Eliminando usuario %s del proyecto %s" -#: nova/virt/xenapi_conn.py:113 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" -msgstr "" -"Debes especificar xenapi_connection_url, xenapi_connection_username " -"(opcional), y xenapi_connection_password para usar connection_type=xenapi" +#, python-format +#~ msgid "Adding user %s to project %s" +#~ msgstr "Añadiendo usuario %s al proyecto %s" -#: nova/virt/xenapi_conn.py:263 #, python-format -msgid "Task [%s] %s status: success %s" -msgstr "Tarea [%s] %s estado: éxito %s" +#~ msgid "Unsupported API request: controller = %s,action = %s" +#~ msgstr "Solicitud de API no soportada: controller=%s,action=%s" -#: nova/virt/xenapi_conn.py:271 #, python-format -msgid "Task [%s] %s status: %s %s" -msgstr "Tarea [%s] %s estado: %s %s" +#~ msgid "Associate address %s to instance %s" +#~ msgstr "Asociar dirección %s a la instancia %s" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 #, python-format -msgid "Got exception: %s" -msgstr "Obtenida excepción %s" +#~ msgid "Attach volume %s to instacne %s at %s" +#~ msgstr "Asociar volumen %s a la instancia %s en %s" -#: nova/virt/xenapi/fake.py:72 #, python-format -msgid "%s: _db_content => %s" -msgstr "%s: _db_content => %s" +#~ msgid "Registered image %s with id %s" +#~ msgstr "Registrada imagen %s con id %s" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" -msgstr "Lanzando NotImplemented" +#, python-format +#~ msgid "User %s is already a member of the group %s" +#~ msgstr "El usuario %s ya es miembro de el grupo %s" -#: nova/virt/xenapi/fake.py:249 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake no tiene una implementación para %s" +#~ msgid "User %s is not a member of project %s" +#~ msgstr "El usuario %s no es miembro del proyecto %s" -#: nova/virt/xenapi/fake.py:283 #, python-format -msgid "Calling %s %s" -msgstr "Llamando %s %s" +#~ msgid "failed authorization: no project named %s (user=%s)" +#~ msgstr "" +#~ "fallo de autorización: no existe proyecto con el nombre %s (usuario=%s)" -#: nova/virt/xenapi/fake.py:288 #, python-format -msgid "Calling getter %s" -msgstr "Llanado al adquiridor %s" +#~ msgid "Failed authorization: user %s not admin and not member of project %s" +#~ msgstr "" +#~ "Fallo de autorización: el usuario %s no es administrador y no es miembro del " +#~ "proyecto %s" -#: nova/virt/xenapi/fake.py:340 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -msgstr "" -"xenapi.fake no tiene una implementación para %s o ha sido llamada con un " -"número incorrecto de argumentos" +#~ msgid "Created user %s (admin: %r)" +#~ msgstr "Creado usuario %s (administrador: %r)" -#: nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "Encontrada una red no única para el puente %s" +#~ msgid "Created project %s with manager %s" +#~ msgstr "Proyecto %s creado con administrador %s" -#: nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "Found no network for bridge %s" -msgstr "No se ha encontrado red para el puente %s" +#~ msgid "Removing role %s from user %s on project %s" +#~ msgstr "Eliminando rol %s al usuario %s en el proyecto %s" -#: nova/virt/xenapi/vm_utils.py:127 #, python-format -msgid "Created VM %s as %s." -msgstr "Creada VM %s cómo %s" +#~ msgid "Adding role %s to user %s in project %s" +#~ msgstr "Añadiendo rol %s al usuario %s en el proyecto %s" -#: nova/virt/xenapi/vm_utils.py:147 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " -msgstr "Creando VBD para VM %s, VDI %s... " +#~ msgid "Remove user %s from project %s" +#~ msgstr "Eliminar usuario %s del proyecto %s" -#: nova/virt/xenapi/vm_utils.py:149 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." -msgstr "Creado VBD %s for VM %s, VDI %s." +#~ msgid "Admin status set to %r for user %s" +#~ msgstr "El estado del administrador se ha fijado a %r para el usuario %s" -#: nova/virt/xenapi/vm_utils.py:165 #, python-format -msgid "VBD not found in instance %s" -msgstr "VBD no encontrado en la instancia %s" +#~ msgid "Going to try and terminate %s" +#~ msgstr "Se va a probar y terminar %s" -#: nova/virt/xenapi/vm_utils.py:175 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "Imposible desconectar VBD %s" +#~ msgid "Casting to scheduler for %s/%s's instance %s" +#~ msgstr "Llamando al planificar para %s/%s insntancia %s" -#: nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "Imposible destruir VBD %s" +#~ msgid "Quota exceeeded for %s, tried to run %s instances" +#~ msgstr "Quota superada por %s, intentando lanzar %s instancias" -#: nova/virt/xenapi/vm_utils.py:202 #, python-format -msgid "Creating VIF for VM %s, network %s." -msgstr "Creando VIF para VM %s, red %s." +#~ msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +#~ msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" -#: nova/virt/xenapi/vm_utils.py:205 #, python-format -msgid "Created VIF %s for VM %s, network %s." -msgstr "Creado VIF %s para VM %s, red %s." +#~ msgid "Input partition size not evenly divisible by sector size: %d / %d" +#~ msgstr "" +#~ "El tamaño de la partición de entrada no es divisible de forma uniforme por " +#~ "el tamaño del sector: %d / %d" -#: nova/virt/xenapi/vm_utils.py:216 #, python-format -msgid "Snapshotting VM %s with label '%s'..." -msgstr "Creando snapshot de la VM %s con la etiqueta '%s'..." +#~ msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +#~ msgstr "" +#~ "Los bytes del almacenamiento local no son divisibles de forma uniforme por " +#~ "el tamaño del sector: %d / %d" -#: nova/virt/xenapi/vm_utils.py:229 #, python-format -msgid "Created snapshot %s from VM %s." -msgstr "Creando snapshot %s de la VM %s" +#~ msgid "volume %s: creating lv of size %sG" +#~ msgstr "volumen %s: creando lv de tamaño %sG" -#: nova/virt/xenapi/vm_utils.py:243 #, python-format -msgid "Asking xapi to upload %s as '%s'" -msgstr "Solicitando a xapi la subida de %s cómo %s'" +#~ msgid "Disassociating address %s" +#~ msgstr "Desasociando la dirección %s" -#: nova/virt/xenapi/vm_utils.py:261 #, python-format -msgid "Asking xapi to fetch %s as %s" -msgstr "Solicitando a xapi obtener %s cómo %s" +#~ msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +#~ msgstr "" +#~ "intentando reiniciar una instancia que no está en ejecución: %s (estado: %s " +#~ "esperado: %s)" -#: nova/virt/xenapi/vm_utils.py:279 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "Buscando vid %s para el kernel PV" +#~ msgid "" +#~ "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +#~ msgstr "" +#~ "intentando crear un snapshot de una instancia que no está en ejecución: %s " +#~ "(estado: %s esperado: %s)" -#: nova/virt/xenapi/vm_utils.py:290 #, python-format -msgid "PV Kernel in VDI:%d" -msgstr "PV Kernel en VDI:%d" +#~ msgid "Detach volume %s from mountpoint %s on instance %s" +#~ msgstr "Desvinculando volumen %s del punto de montaje %s en la instancia %s" -#: nova/virt/xenapi/vm_utils.py:318 #, python-format -msgid "VDI %s is still available" -msgstr "VDI %s está todavía disponible" +#~ msgid "Cannot get blockstats for \"%s\" on \"%s\"" +#~ msgstr "No puedo obtener estadísticas del bloque para \"%s\" en \"%s\"" -#: nova/virt/xenapi/vm_utils.py:331 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver vm state -> |%s|" +#~ msgid "Cannot get ifstats for \"%s\" on \"%s\"" +#~ msgstr "No puedo obtener estadísticas de la interfaz para \"%s\" en \"%s\"" -#: nova/virt/xenapi/vm_utils.py:333 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi power_state -> |%s|" +#~ msgid "No instance for id %s" +#~ msgstr "No hay instancia con id %s" -#: nova/virt/xenapi/vm_utils.py:390 #, python-format -msgid "VHD %s has parent %s" -msgstr "VHD %s tiene cómo padre a %s" +#~ msgid "no keypair for user %s, name %s" +#~ msgstr "no hay par de claves para el usuario %s, nombre %s" -#: nova/virt/xenapi/vm_utils.py:407 #, python-format -msgid "Re-scanning SR %s" -msgstr "Re-escaneando SR %s" +#~ msgid "No service for %s, %s" +#~ msgstr "No hay servicio para %s, %s" -#: nova/virt/xenapi/vm_utils.py:431 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." -msgstr "" -"El padre %s no concuerda con el padre original %s, esperando la unión..." +#~ msgid "No volume for id %s" +#~ msgstr "No hay volumen para el id %s" -#: nova/virt/xenapi/vm_utils.py:448 #, python-format -msgid "No VDIs found for VM %s" -msgstr "No se han encontrado VDI's para VM %s" +#~ msgid "No security group named %s for project: %s" +#~ msgstr "No hay un grupo de seguridad con nombre %s para el proyecto: %s" -#: nova/virt/xenapi/vm_utils.py:452 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" -msgstr "Número no esperado de VDIs (%s) encontrados para VM %s" +#~ msgid "Parallax returned HTTP error %d from request for /images/detail" +#~ msgstr "" +#~ "Parallax ha devuelto un error HTTP %d para la petición para /images/detail" -#: nova/virt/xenapi/vmops.py:62 #, python-format -msgid "Attempted to create non-unique name %s" -msgstr "Intentado la creación del nombre no único %s" +#~ msgid "Parallax returned HTTP error %d from request for /images" +#~ msgstr "Parallax ha devuelto un error HTTP %d a la petición para /images" -#: nova/virt/xenapi/vmops.py:99 #, python-format -msgid "Starting VM %s..." -msgstr "Iniciando VM %s..." +#~ msgid "IP %s leased to bad mac %s vs %s" +#~ msgstr "IP %s asociada a una mac incorrecta %s vs %s" -#: nova/virt/xenapi/vmops.py:101 #, python-format -msgid "Spawning VM %s created %s." -msgstr "Iniciando VM %s creado %s." +#~ msgid "Unauthorized attempt to get object %s from bucket %s" +#~ msgstr "Intento no autorizado de obtener el objeto %s en el cubo %s" -#: nova/virt/xenapi/vmops.py:112 #, python-format -msgid "Instance %s: booted" -msgstr "Instancia %s: iniciada" +#~ msgid "Getting object: %s / %s" +#~ msgstr "Obteniendo objeto: %s / %s" -#: nova/virt/xenapi/vmops.py:137 #, python-format -msgid "Instance not present %s" -msgstr "Instancia no existente %s" +#~ msgid "Putting object: %s / %s" +#~ msgstr "Colocando objeto: %s / %s" -#: nova/virt/xenapi/vmops.py:166 #, python-format -msgid "Starting snapshot for VM %s" -msgstr "Comenzando snapshot para la VM %s" +#~ msgid "Unauthorized attempt to upload object %s to bucket %s" +#~ msgstr "Intento no autorizado de subir el objeto %s al cubo %s" -#: nova/virt/xenapi/vmops.py:174 #, python-format -msgid "Unable to Snapshot %s: %s" -msgstr "Incapaz de realizar snapshot %s: %s" +#~ msgid "Deleting object: %s / %s" +#~ msgstr "Eliminando objeto: %s / %s" -#: nova/virt/xenapi/vmops.py:184 #, python-format -msgid "Finished snapshot and upload for VM %s" -msgstr "Finalizado el snapshot y la subida de la VM %s" +#~ msgid "Toggling publicity flag of image %s %r" +#~ msgstr "Cambiando los atributos de publicidad de la imagen %s %r" -#: nova/virt/xenapi/vmops.py:252 #, python-format -msgid "suspend: instance not present %s" -msgstr "suspendido: instancia no encontrada: %s" +#~ msgid "Creating disk for %s by attaching disk file %s" +#~ msgstr "" +#~ "Creando disco para %s a través de la asignación del fichero de disco %s" -#: nova/virt/xenapi/vmops.py:262 #, python-format -msgid "resume: instance not present %s" -msgstr "reanudar: instancia no encontrada %s" +#~ msgid "WMI job succeeded: %s, Elapsed=%s " +#~ msgstr "Trabajo WMI ha tenido exito: %s, Transcurrido=%s " -#: nova/virt/xenapi/vmops.py:271 #, python-format -msgid "Instance not found %s" -msgstr "instancia no encontrada %s" +#~ msgid "Created switch port %s on switch %s" +#~ msgstr "Creado puerto %s en el switch %s" -#: nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Introducing %s..." -msgstr "Introduciendo %s..." +#~ msgid "instance %s: deleting instance files %s" +#~ msgstr "instancia %s: eliminando los ficheros de la instancia %s" -#: nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Introduced %s as %s." -msgstr "Introducido %s cómo %s." +#~ msgid "Finished retreving %s -- placed in %s" +#~ msgstr "Finalizada la obtención de %s -- coloado en %s" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" -msgstr "Imposible crear el repositorio de almacenamiento" +#, python-format +#~ msgid "Failed to change vm state of %s to %s" +#~ msgstr "Fallo al cambiar el estado de la vm de %s a %s" -#: nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "Imposible encontrar SR en VBD %s" +#~ msgid "Successfully changed vm state of %s to %s" +#~ msgstr "Cambio de estado de la vm con éxito de %s a %s" -#: nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Forgetting SR %s ... " -msgstr "Olvidando SR %s... " +#~ msgid "" +#~ "Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +#~ "cpu_time=%s" +#~ msgstr "" +#~ "Obtenida información para vm %s: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" -#: nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" -msgstr "Ignorando excepción %s al obtener PBDs de %s" +#~ msgid "instance %s: ignoring error injecting data into image %s (%s)" +#~ msgstr "" +#~ "instancia %s: ignorando el error al inyectar datos en la imagen %s (%s)" -#: nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" -msgstr "Ignorando excepción %s al desconectar PBD %s" +#~ msgid "Contents of file %s: %r" +#~ msgstr "Contenidos del fichero %s: %r" -#: nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Forgetting SR %s done." -msgstr "Olvidando SR %s completado." +#~ msgid "instance %s: injecting net into image %s" +#~ msgstr "instancia %s: inyectando red en la imagen %s" -#: nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" -msgstr "Ignorando excepción %s al olvidar SR %s" +#~ msgid "instance %s: injecting key into image %s" +#~ msgstr "instancia %s: inyectando clave en la imagen %s" -#: nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "Incapaz de insertar VDI en SR %s" +#~ msgid "data: %r, fpath: %r" +#~ msgstr "datos: %r, fpath: %r" -#: nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Unable to get record of VDI %s on" -msgstr "Imposible obtener copia del VDI %s en" +#~ msgid "Task [%s] %s status: %s %s" +#~ msgstr "Tarea [%s] %s estado: %s %s" -#: nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "Inposible insertar VDI para SR %s" +#~ msgid "Task [%s] %s status: success %s" +#~ msgstr "Tarea [%s] %s estado: éxito %s" -#: nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Unable to obtain target information %s, %s" -msgstr "Imposible obtener información del destino %s, %s" +#~ msgid "Calling %s %s" +#~ msgstr "Llamando %s %s" -#: nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "Punto de montaje no puede ser traducido: %s" +#~ msgid "%s: _db_content => %s" +#~ msgstr "%s: _db_content => %s" -#: nova/virt/xenapi/volumeops.py:51 #, python-format -msgid "Attach_volume: %s, %s, %s" -msgstr "Attach_volume: %s, %s, %s" +#~ msgid "Created VBD %s for VM %s, VDI %s." +#~ msgstr "Creado VBD %s for VM %s, VDI %s." -#: nova/virt/xenapi/volumeops.py:69 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" -msgstr "Inpoisble crear VDI en SR %s para la instancia %s" +#~ msgid "Creating VBD for VM %s, VDI %s ... " +#~ msgstr "Creando VBD para VM %s, VDI %s... " -#: nova/virt/xenapi/volumeops.py:81 #, python-format -msgid "Unable to use SR %s for instance %s" -msgstr "Imposible utilizar SR %s para la instancia %s" +#~ msgid "Created VIF %s for VM %s, network %s." +#~ msgstr "Creado VIF %s para VM %s, red %s." -#: nova/virt/xenapi/volumeops.py:93 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "Imposible adjuntar volumen a la instancia %s" +#~ msgid "Creating VIF for VM %s, network %s." +#~ msgstr "Creando VIF para VM %s, red %s." -#: nova/virt/xenapi/volumeops.py:95 #, python-format -msgid "Mountpoint %s attached to instance %s" -msgstr "Punto de montaje %s unido a la instancia %s" +#~ msgid "Created VM %s as %s." +#~ msgstr "Creada VM %s cómo %s" -#: nova/virt/xenapi/volumeops.py:106 #, python-format -msgid "Detach_volume: %s, %s" -msgstr "Detach_volume: %s, %s" +#~ msgid "Asking xapi to upload %s as '%s'" +#~ msgstr "Solicitando a xapi la subida de %s cómo %s'" -#: nova/virt/xenapi/volumeops.py:113 #, python-format -msgid "Unable to locate volume %s" -msgstr "Imposible encontrar volumen %s" +#~ msgid "VHD %s has parent %s" +#~ msgstr "VHD %s tiene cómo padre a %s" -#: nova/virt/xenapi/volumeops.py:121 #, python-format -msgid "Unable to detach volume %s" -msgstr "Imposible desasociar volumen %s" +#~ msgid "Asking xapi to fetch %s as %s" +#~ msgstr "Solicitando a xapi obtener %s cómo %s" -#: nova/virt/xenapi/volumeops.py:128 #, python-format -msgid "Mountpoint %s detached from instance %s" -msgstr "Punto d emontaje %s desasociado de la instancia %s" +#~ msgid "PV Kernel in VDI:%d" +#~ msgstr "PV Kernel en VDI:%d" -#: nova/volume/api.py:44 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" -msgstr "Quota excedida para %s, intentando crear el volumen %sG" +#~ msgid "Unexpected number of VDIs (%s) found for VM %s" +#~ msgstr "Número no esperado de VDIs (%s) encontrados para VM %s" -#: nova/volume/api.py:46 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" -msgstr "Quota de volumen superada. No puedes crear un volumen de tamaño %s" +#~ msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +#~ msgstr "" +#~ "El padre %s no concuerda con el padre original %s, esperando la unión..." -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" -msgstr "El estado del volumen debe estar disponible" +#, python-format +#~ msgid "suspend: instance not present %s" +#~ msgstr "suspendido: instancia no encontrada: %s" -#: nova/volume/api.py:97 -msgid "Volume is already attached" -msgstr "El volumen ya está asociado previamente" +#, python-format +#~ msgid "Introduced %s as %s." +#~ msgstr "Introducido %s cómo %s." -#: nova/volume/api.py:103 -msgid "Volume is already detached" -msgstr "El volumen ya ha sido desasociado previamente" +#, python-format +#~ msgid "resume: instance not present %s" +#~ msgstr "reanudar: instancia no encontrada %s" -#: nova/volume/driver.py:76 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "Recuperandose de una ejecución fallida. Intenta el número %s" +#~ msgid "Instance not found %s" +#~ msgstr "instancia no encontrada %s" -#: nova/volume/driver.py:85 #, python-format -msgid "volume group %s doesn't exist" -msgstr "el grupo de volumenes %s no existe" +#~ msgid "Ignoring exception %s when getting PBDs for %s" +#~ msgstr "Ignorando excepción %s al obtener PBDs de %s" -#: nova/volume/driver.py:210 #, python-format -msgid "FAKE AOE: %s" -msgstr "Falso AOE: %s" +#~ msgid "Unable to create VDI on SR %s for instance %s" +#~ msgstr "Inpoisble crear VDI en SR %s para la instancia %s" -#: nova/volume/driver.py:315 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "Falso ISCSI: %s" +#~ msgid "Unable to obtain target information %s, %s" +#~ msgstr "Imposible obtener información del destino %s, %s" -#: nova/volume/manager.py:85 #, python-format -msgid "Re-exporting %s volumes" -msgstr "Exportando de nuevo los volumenes %s" +#~ msgid "Ignoring exception %s when forgetting SR %s" +#~ msgstr "Ignorando excepción %s al olvidar SR %s" -#: nova/volume/manager.py:93 #, python-format -msgid "volume %s: creating" -msgstr "volumen %s: creando" +#~ msgid "Ignoring exception %s when unplugging PBD %s" +#~ msgstr "Ignorando excepción %s al desconectar PBD %s" -#: nova/volume/manager.py:102 #, python-format -msgid "volume %s: creating lv of size %sG" -msgstr "volumen %s: creando lv de tamaño %sG" +#~ msgid "Attach_volume: %s, %s, %s" +#~ msgstr "Attach_volume: %s, %s, %s" -#: nova/volume/manager.py:106 #, python-format -msgid "volume %s: creating export" -msgstr "volumen %s: exportando" +#~ msgid "Unable to use SR %s for instance %s" +#~ msgstr "Imposible utilizar SR %s para la instancia %s" -#: nova/volume/manager.py:113 #, python-format -msgid "volume %s: created successfully" -msgstr "volumen %s: creado satisfactoriamente" +#~ msgid "Mountpoint %s attached to instance %s" +#~ msgstr "Punto de montaje %s unido a la instancia %s" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" -msgstr "El volumen todavía está asociado" +#, python-format +#~ msgid "Detach_volume: %s, %s" +#~ msgstr "Detach_volume: %s, %s" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" -msgstr "Volumen no local a este nodo" +#, python-format +#~ msgid "Mountpoint %s detached from instance %s" +#~ msgstr "Punto d emontaje %s desasociado de la instancia %s" -#: nova/volume/manager.py:124 #, python-format -msgid "volume %s: removing export" -msgstr "volumen %s: eliminando exportación" +#~ msgid "Quota exceeeded for %s, tried to create %sG volume" +#~ msgstr "Quota excedida para %s, intentando crear el volumen %sG" -#: nova/volume/manager.py:126 #, python-format -msgid "volume %s: deleting" -msgstr "volumen %s: eliminando" +#~ msgid "Volume quota exceeded. You cannot create a volume of size %s" +#~ msgstr "Quota de volumen superada. No puedes crear un volumen de tamaño %s" -#: nova/volume/manager.py:129 #, python-format -msgid "volume %s: deleted successfully" -msgstr "volumen %s: eliminado satisfactoriamente" +#~ msgid "instance %s: attach failed %s, removing" +#~ msgstr "instalación %s: asociación fallida %s, eliminando" + +#, python-format +#~ msgid "instance %s: attaching volume %s to %s" +#~ msgstr "instancia %s: asociando volumen %s a %s" + +#, python-format +#~ msgid "Snapshotting VM %s with label '%s'..." +#~ msgstr "Creando snapshot de la VM %s con la etiqueta '%s'..." + +#, python-format +#~ msgid "Created snapshot %s from VM %s." +#~ msgstr "Creando snapshot %s de la VM %s" + +#, python-format +#~ msgid "Unable to Snapshot %s: %s" +#~ msgstr "Incapaz de realizar snapshot %s: %s" + +#, python-format +#~ msgid "Adding sitewide role %s to user %s" +#~ msgstr "Añadiendo rol global %s al usuario %s" + +#, python-format +#~ msgid "Removing sitewide role %s from user %s" +#~ msgstr "Eliminando rol global %s del usuario %s" + +#, python-format +#~ msgid "Del: disk %s vm %s" +#~ msgstr "Del: disco %s vm %s" + +#, python-format +#~ msgid "Spawning VM %s created %s." +#~ msgstr "Iniciando VM %s creado %s." @@ -7,2135 +7,2892 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-14 17:17+0000\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-02-22 19:34+0000\n" "Last-Translator: Armando Migliaccio <Unknown>\n" "Language-Team: Italian <it@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "Nessun host trovato" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" +"Si e' verificato un errore inatteso durante l'esecuzione del comando." + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" +"%(description)s\n" +"Comando: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "DB eccezione wrappata" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Eccezione non gestita" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" +"Quota ecceduta per %(pid)s, tentato di creare un volume di dimensione " +"%(size)sG" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "Quota volume ecceduta. Non puoi creare un volume di dimensione %sG" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "Lo stato del volume deve essere disponibile" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "Il volume é già collegato" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "Il volume é già scollegato" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "Impossibile leggere l'indirizzo IP privato" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "Impossibile leggere gli indirizzi IP pubblici" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" +"La proprietà %(param)s non é stata trovata per l'immagine %(_image_id)s" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "No keypairs definita" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "Numero errato di argomenti" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" +"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "Nessun processo trovato" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "Servire %s" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "Insieme di FLAGS:" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "Avvio di %s" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "Istanza %s non trovata" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Impossible creare il VDI su SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "Impossibile usare SR %(sr_ref)s per l'istanza %(instance_name)s" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "Impossibile montare il volume all'istanza %s" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s montato all'istanza %(instance_name)s" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "Impossibile localizzare il volume %s" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "Impossibile smontare il volume %s" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "Mountpoint %(mountpoint)s smontato dall'istanza %(instance_name)s" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "Tipo dell'istanza sconosciuto: %s" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" -msgstr "Nome del file root CA" +msgstr "Filename di root CA" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" -msgstr "Nome del file della chiave privata" +msgstr "Nome file della chiave privata" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" -msgstr "" +msgstr "Nome file di root Certificate Revokation List" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "Dove si conservano le chiavi" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "Dove si conserva root CA" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "Si dovrebbe usare un CA per ogni progetto?" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" "Soggetto per il certificato degli utenti, %s per progetto, utente, orario" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "Soggetto per il certificato dei progetti, %s per progetto, orario" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "Soggetto per il certificato delle vpn, %s per progetto, orario" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "Percorso dei flags: %s" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "" -"Si e' verificato un errore inatteso durante l'esecuzione del comando." +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "Trasmissione asincrona a %(topic)s %(host)s for %(method)s" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:78 #, python-format -msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Comando: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Eccezione non gestita" +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorazione: |%s|" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:80 #, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "(%s) pubblica (chiave: %s) %s" +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" +"check_instance_lock: argomenti: |%(self)s| |%(context)s| |%(instance_id)s|" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:84 #, python-format -msgid "Publishing to route %s" -msgstr "Pubblicando sulla route %s" +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: bloccato: |%s|" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Declaring queue %s" -msgstr "Dichiarando la coda %s" +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring exchange %s" -msgstr "Dichiarando il centralino %s" +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: esecuzione: |%s|" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Binding %s to %s with key %s" -msgstr "Collegando %s a %s con la chiave %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: non esecuzione |%s|" -#: nova/fakerabbit.py:120 +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "L'istanza é stata già creata" + +#: ../nova/compute/manager.py:180 #, python-format -msgid "Getting from %s: %s" -msgstr "" +msgid "instance %s: starting..." +msgstr "Istanza %s: in esecuzione..." -#: nova/rpc.py:92 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "" -"Il server AMQP su %s:%d non é raggiungibile. Riprovare in %d secondi." +msgid "instance %s: Failed to spawn" +msgstr "Istanza %s: esecuzione fallita..." -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." -msgstr "" -"Impossibile connettersi al server AMQP dopo %d tentativi. Terminando " -"l'applicazione." +msgid "Terminating instance %s" +msgstr "Terminando l'istanza %s" -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "Riconnesso alla coda" +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "Deallocando l'indirizzo %s" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "Impossibile prelevare il messaggio dalla coda" +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "Provando a distruggere una istanza già distrutta: %s" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:282 #, python-format -msgid "Initing the Adapter Consumer for %s" -msgstr "Inizializzando il Consumer Adapter per %s" +msgid "Rebooting instance %s" +msgstr "Riavviando l'istanza %s" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:287 #, python-format -msgid "received %s" -msgstr "ricevuto %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:311 #, python-format -msgid "no method for message: %s" -msgstr "nessun metodo per il messaggio: %s" +msgid "instance %s: snapshotting" +msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:316 #, python-format -msgid "No method for message: %s" -msgstr "nessun metodo per il messagggio: %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:332 #, python-format -msgid "Returning exception %s to caller" -msgstr "Sollevando eccezione %s al chiamante" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:335 #, python-format -msgid "unpacked context: %s" -msgstr "contesto decompresso: %s" +msgid "instance %s: setting admin password" +msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "Facendo chiamata asincrona..." +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:362 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID é %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:372 #, python-format -msgid "response %s" -msgstr "risposta %s" +msgid "instance %s: rescuing" +msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:387 #, python-format -msgid "topic is %s" -msgstr "argomento é %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:406 #, python-format -msgid "message %s" -msgstr "messaggio %s" +msgid "instance %s: pausing" +msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:423 #, python-format -msgid "Starting %s node" -msgstr "Avviando il nodo %s" +msgid "instance %s: unpausing" +msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" -msgstr "Servizio terminato che non ha entry nel database" +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." -msgstr "Il servizio é scomparso dal database, ricreo." +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" -msgstr "Connessione al model server ripristinata!" +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "" -#: nova/service.py:208 -msgid "model server went away" -msgstr "model server é scomparso" +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:503 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." -msgstr "Datastore %s é irrangiungibile. Riprovare in %d seconds." +msgid "instance %s: unlocking" +msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Serving %s" -msgstr "Servire %s" +msgid "instance %s: getting locked state" +msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "Insieme di FLAGS:" +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" +msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" +msgid "Get console output for instance %s" msgstr "" -"Il pidfile %s non esiste. Assicurarsi che il demone é in esecuzione.\n" -#: nova/twistd.py:268 +#: ../nova/compute/manager.py:543 #, python-format -msgid "Starting %s" -msgstr "Avvio di %s" +msgid "instance %s: getting ajax console" +msgstr "" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Inner Exception: %s" -msgstr "Eccezione interna: %s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" -#: nova/utils.py:54 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Class %s cannot be found" -msgstr "Classe %s non può essere trovata" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Fetching %s" -msgstr "Prelievo %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" -#: nova/utils.py:125 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Esecuzione del comando (sottoprocesso): %s" +msgid "Detaching volume from unknown instance %s" +msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Result was %s" -msgstr "Il risultato é %s" +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" -#: nova/utils.py:171 +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "debug in callback: %s" -msgstr "debug in callback: %s" +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" -#: nova/utils.py:176 +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 #, python-format -msgid "Running %s" +msgid "Re-exporting %s volumes" msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" +msgid "volume %s: skipping export" msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Invalid backend: %s" +msgid "volume %s: creating" msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:108 #, python-format -msgid "backend %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:123 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" + +#: ../nova/volume/manager.py:136 #, python-format -msgid "Authentication Failure: %s" +msgid "volume %s: removing export" msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/volume/manager.py:138 #, python-format -msgid "Authenticated Request For %s:%s)" +msgid "volume %s: deleting" msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/volume/manager.py:147 #, python-format -msgid "action: %s" +msgid "volume %s: deleted successfully" msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "arg: %s\t\tval: %s" +msgid "%(text)s: _db_content => %(content)s" msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "NotFound raised: %s" +msgid "Calling %(localname)s %(impl)s" msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "ApiError raised: %s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "Unexpected error raised: %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." msgstr "" -#: nova/api/ec2/admin.py:84 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Creating new user: %s" +msgid "Need to watch instance %s until it's running..." msgstr "" -#: nova/api/ec2/admin.py:92 -#, python-format -msgid "Deleting user: %s" +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Adding role %s to user %s for project %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "Starting Bridge interface for %s" msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Removing role %s from user %s for project %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/admin.py:159 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Create project %s managed by %s" +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:58 #, python-format -msgid "Delete project: %s" +msgid "Inner Exception: %s" +msgstr "Eccezione interna: %s" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "Classe %s non può essere trovata" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "Prelievo %s" + +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Esecuzione del comando (sottoprocesso): %s" + +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "Il risultato é %s" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:217 +#, python-format +msgid "debug in callback: %s" +msgstr "debug in callback: %s" + +#: ../nova/utils.py:222 #, python-format -msgid "Adding user %s to project %s" +msgid "Running %s" msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:262 #, python-format -msgid "Removing user %s from project %s" +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/utils.py:265 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/utils.py:363 #, python-format -msgid "Generating root CA: %s" +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/utils.py:374 #, python-format -msgid "Create key pair %s" +msgid "backend %s" msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/fakerabbit.py:49 #, python-format -msgid "Delete key pair %s" +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "%s is not a valid ipProtocol" +msgid "Publishing to route %s" +msgstr "Pubblicando sulla route %s" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "Dichiarando la coda %s" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "Dichiarando il centralino %s" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Revoke security group ingress %s" +msgid "Created VM %s..." msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Authorize security group ingress %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "This rule already exists in group %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Create Security Group %s" +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "group %s already exists" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Delete security group %s" +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "Get console output for instance %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "Create volume of %s GB" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:579 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "Detach volume %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "Release address %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "Associate address %s to instance %s" +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Disassociate address %s" +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "Reboot instance %r" +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "De-registering image %s" +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "Registered image %s with id %s" +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "attribute not supported: %s" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/ec2/cloud.py:794 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "invalid id: %s" +msgid "Running pygrub against %s" msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:411 +#, python-format +msgid "Found Xen kernel %s" msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 +#, python-format +msgid "duplicate name found: %s" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Updating image %s publicity" +msgid "VDI %s is still available" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Caught error: %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Compute.api::lock %s" +msgid "Re-scanning SR %s" msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Compute.api::unlock %s" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Compute.api::get_lock %s" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Compute.api::pause %s" +msgid "No VDIs found for VM %s" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:594 #, python-format -msgid "Compute.api::unpause %s" +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "compute.api::suspend %s" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "compute.api::resume %s" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "User %s already exists" +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "User \"%s\" not found" +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "Project \"%s\" not found" +msgid "Destroying VBD for VDI %s done." msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." msgstr "" -#: nova/auth/ldapdriver.py:181 -#, python-format -msgid "LDAP object for %s doesn't exist" +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "User %s is already a member of the group %s" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "Looking up user: %r" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "Failed authorization for access key %s" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 #, python-format -msgid "No user found for access key %s" +msgid "Received %s" msgstr "" -#: nova/auth/manager.py:270 -#, python-format -msgid "Using project name = user name (%s)" +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "No project called %s could be found" +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "User %s is not a member of project %s" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "Invalid signature for user %s" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "The %s role can not be found" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "The %s role is global only" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Adding role %s to user %s in project %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Created project %s with manager %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "modifying project %s" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "Remove user %s from project %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "Deleting project %s" +msgid "No security group with id %s" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1589 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "Deleting user %s" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "Access Key change for user %s" +msgid "No user for id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "Secret Key change for user %s" +msgid "No user for access key %s" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No project with id %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "No vpn data for project %s" +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Launching VPN for %s" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "Instance %d has no host" +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:94 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +msgid "Connecting to libvirt: %s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Going to run %s instances..." +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Going to try and terminate %s" +msgid "No disk at %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Instance %d was not found during terminate" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Instance %d is already being terminated" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %s: is running" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "instance %s: booted" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "Failed to load partition: %s" +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "Unknown instance type: %s" +msgid "Contents of file %(fpath)s: %(contents)r" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:77 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:82 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "instance %s: finished toXML method" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "instance %s: starting..." +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/network/api.py:39 #, python-format -msgid "Terminating instance %s" +msgid "Quota exceeeded for %s, tried to allocate address" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "Disassociating address %s" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/virt/images.py:70 #, python-format -msgid "Deallocating address %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "Rebooting instance %s" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:260 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/api/ec2/cloud.py:303 #, python-format -msgid "instance %s: snapshotting" +msgid "Create key pair %s" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/api/ec2/cloud.py:311 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Delete key pair %s" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/api/ec2/cloud.py:386 #, python-format -msgid "instance %s: rescuing" +msgid "%s is not a valid ipProtocol" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "" + +#: ../nova/api/ec2/cloud.py:421 #, python-format -msgid "instance %s: unrescuing" +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: pausing" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "instance %s: unpausing" +msgid "This rule already exists in group %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/api/ec2/cloud.py:492 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Create Security Group %s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:495 #, python-format -msgid "instance %s: suspending" +msgid "group %s already exists" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "instance %s: resuming" +msgid "Delete security group %s" msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:584 #, python-format -msgid "instance %s: locking" +msgid "Create volume of %s GB" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "instance %s: unlocking" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: getting locked state" +msgid "Detach volume %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:867 #, python-format -msgid "updating %s..." +msgid "De-registering image %s" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "attribute not supported: %s" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:890 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "invalid id: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "Found instance: %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../bin/nova-api.py:57 #, python-format -msgid "No service for id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:59 #, python-format -msgid "No service for %s, %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:64 #, python-format -msgid "No floating ip for address %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:69 #, python-format -msgid "No instance for id %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:83 #, python-format -msgid "Instance %s not found" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:89 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for id %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "No network for bridge %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Token %s does not exist" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "No quota for project_id %s" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No volume for id %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Volume %s not found" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No export device found for volume %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No target id found for volume %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No security group with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No security group named %s for project: %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No secuity group rule with id %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No user for id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "No user for access key %s" +msgid "Instance not present %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "No project with id %s" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/image/glance.py:97 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "Image %s could not be found" +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:564 +#, python-format +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Starting Bridge interface for %s" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Launching VPN for %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:190 +#: ../nova/image/s3.py:99 #, python-format -msgid "Leasing IP %s" +msgid "Image %s could not be found" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "" + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s leased that isn't associated" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:197 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "Authentication Failure: %s" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "IP %s released that isn't associated" +msgid "action: %s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "IP %s released that was not leased" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Unknown S3 value type %r" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "List keys for bucket %s" +msgid "Unexpected error raised: %s" msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "User %s already exists" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 #, python-format -msgid "Creating bucket %s" +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Getting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "User \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Putting object: %s / %s" +msgid "Project \"%s\" not found" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Deleting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/volume/san.py:67 #, python-format -msgid "Updating user fields on image %s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Deleted image: %s" +msgid "Caught error: %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Casting to %s %s for %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" + +#: ../nova/virt/hyperv.py:454 +#, python-format +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/compute/api.py:71 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:77 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:97 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:99 #, python-format -msgid "Connecting to libvirt: %s" +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/compute/api.py:160 #, python-format -msgid "instance %s: deleting instance files %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:187 #, python-format -msgid "No disk at %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rebooted" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: rescued" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/rpc.py:98 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: is running" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." msgstr "" +"Impossibile connettersi al server AMQP dopo %d tentativi. Terminando " +"l'applicazione." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Riconnesso alla coda" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Impossibile prelevare il messaggio dalla coda" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: booted" +msgid "Initing the Adapter Consumer for %s" +msgstr "Inizializzando il Consumer Adapter per %s" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "ricevuto %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "nessun metodo per il messaggio: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "nessun metodo per il messagggio: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Sollevando eccezione %s al chiamante" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "contesto decompresso: %s" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "Facendo chiamata asincrona..." + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:364 #, python-format -msgid "instance %s: failed to boot" +msgid "response %s" +msgstr "risposta %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "argomento é %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "messaggio %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:395 +#: ../nova/volume/driver.py:87 #, python-format -msgid "virsh said: %r" +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/volume/driver.py:220 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "data: %r, fpath: %r" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/volume/driver.py:359 #, python-format -msgid "Contents of file %s: %r" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: Creating image" +msgid "Sheepdog is not working: %s" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/virt/fake.py:239 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/network/manager.py:153 #, python-format -msgid "instance %s: starting toXML method" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 #, python-format -msgid "instance %s: finished toXML method" +msgid "Leasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:216 +#, python-format +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../nova/network/manager.py:220 +#, python-format +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: ../nova/network/manager.py:241 +#, python-format +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" + +#: ../nova/network/manager.py:244 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Got exception: %s" +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "%s: _db_content => %s" +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Calling %s %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Calling getter %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Found no network for bridge %s" +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "VBD not found in instance %s" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "VDI %s is still available" +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "VHD %s has parent %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Re-scanning SR %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "No VDIs found for VM %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Starting VM %s..." +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Spawning VM %s created %s." +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/auth/manager.py:263 #, python-format -msgid "Instance %s: booted" +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Instance not present %s" +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/auth/manager.py:287 #, python-format -msgid "suspend: instance not present %s" +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#: ../nova/auth/manager.py:289 #, python-format -msgid "resume: instance not present %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Instance not found %s" +msgid "Invalid signature for user %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "Introducing %s..." +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Introduced %s as %s." +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Forgetting SR %s ... " +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Forgetting SR %s done." +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "Deleting project %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Deleting user %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Access Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "No vpn data for project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/service.py:161 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "Servizio terminato che non ha entry nel database" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "Il servizio é scomparso dal database, ricreo." + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "Connessione al model server ripristinata!" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "model server é scomparso" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "Detach_volume: %s, %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "Unable to locate volume %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Unable to detach volume %s" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "volume group %s doesn't exist" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "FAKE AOE: %s" +msgid "Group at dn %s doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Found non-unique network for bridge %s" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "Re-exporting %s volumes" +msgid "Found no network for bridge %s" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/api/ec2/admin.py:97 #, python-format -msgid "volume %s: creating" +msgid "Creating new user: %s" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/api/ec2/admin.py:105 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "Deleting user: %s" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/api/ec2/admin.py:127 #, python-format -msgid "volume %s: creating export" +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/api/ec2/admin.py:131 #, python-format -msgid "volume %s: created successfully" +msgid "Adding sitewide role %(role)s to user %(user)s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 #, python-format -msgid "volume %s: removing export" +msgid "Getting x509 for user: %(name)s on project: %(project)s" msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/api/ec2/admin.py:177 #, python-format -msgid "volume %s: deleting" +msgid "Create project %(name)s managed by %(manager_user)s" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/api/ec2/admin.py:190 #, python-format -msgid "volume %s: deleted successfully" +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" msgstr "" + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Comando: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" + +#, python-format +#~ msgid "(%s) publish (key: %s) %s" +#~ msgstr "(%s) pubblica (chiave: %s) %s" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "Il server AMQP su %s:%d non é raggiungibile. Riprovare in %d secondi." + +#, python-format +#~ msgid "Binding %s to %s with key %s" +#~ msgstr "Collegando %s a %s con la chiave %s" + +#, python-format +#~ msgid "Starting %s node" +#~ msgstr "Avviando il nodo %s" + +#, python-format +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "Datastore %s é irrangiungibile. Riprovare in %d seconds." @@ -7,2137 +7,3335 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-14 09:04+0000\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-03-29 07:27+0000\n" "Last-Translator: Koji Iida <Unknown>\n" "Language-Team: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-30 05:22+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "適切なホストが見つかりません。" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "コマンド実行において予期しないエラーが発生しました。" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "キャッチされなかった例外" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "サイズ %(size)sG のボリュームの作成を行おうとしましたが、%(pid)sのクオータを超えています。" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "ボリュームのクオータを超えています。%sの大きさのボリュームは作成できません。" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "ボリュームのステータス(status)が available でなければなりません。" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "ボリュームは既にアタッチされています(attached)。" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "ボリュームは既にデタッチされています(detached)。" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "イメージ %(_image_id)s に対するプロパティ %(param)s が見つかりません" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "キーペアが定義されていません。" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "例外: Compute.api::lock %s" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "例外: Compute.api::unlock %s" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "例外: Compute.api::get_lock %s" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "例外: Compute.api::pause %s" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "例外: Compute.api::unpause %s" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "例外: compute.api::suspend %s" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "例外: compute.api::resume %s" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "引数の数が異なります。" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "そのようなプロセスはありません" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "%s サービスの開始" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "FLAGSの一覧:" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "%s を開始します。" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "インスタンス %s が見つかりません。" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "インスタンス %s にボリュームをアタッチできません。" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "インスタンス %(instance_name)s にマウントポイント %(mountpoint)s をアタッチしました。" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "ボリューム %s の存在が確認できません。" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "ボリューム %s のデタッチができません。" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "インスタンス %(instance_name)s からマウントポイント %(mountpoint)s をデタッチしました。" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "%s は未知のインスタンスタイプです。" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "ルートCAのファイル名" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "プライベートキーのファイル名" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "ルート証明書失効リストのファイル名" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "キーを格納するパス" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "ルートCAを格納するパス" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "プロジェクトごとにCAを使用するか否かのフラグ" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "ユーザの証明書のサブジェクト、%s はプロジェクト、ユーザ、タイムスタンプ" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "プロジェクトの証明書のサブジェクト、%s はプロジェクト、およびタイムスタンプ" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "vpnの証明書のサブジェクト、%sはプロジェクト、およびタイムスタンプ" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "Flags のパス: %s" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "コマンド実行において予期しないエラーが発生しました。" +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: ../nova/compute/manager.py:80 #, python-format msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"コマンド: %s\n" -"終了コード: %s\n" -"標準出力: %r\n" -"標準エラー出力: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "キャッチされなかった例外" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:84 #, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "(%s) パブリッシュ (key: %s) %s" +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Publishing to route %s" -msgstr "ルート %s へパブリッシュ" +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring queue %s" -msgstr "queue %s の宣言" +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executing: |%s|" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Declaring exchange %s" -msgstr "exchange %s の宣言" +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executing |%s|" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "インスタンスは既に生成されています。" + +#: ../nova/compute/manager.py:180 #, python-format -msgid "Binding %s to %s with key %s" -msgstr "%s を %s にキー %s でバインドします。" +msgid "instance %s: starting..." +msgstr "インスタンス %s を開始します。" -#: nova/fakerabbit.py:120 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Getting from %s: %s" -msgstr "%s から %s を取得" +msgid "instance %s: Failed to spawn" +msgstr "インスタンス %s の起動に失敗しました。" -#: nova/rpc.py:92 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "AMQPサーバ %s:%d に接続できません。 %d 秒後に再度試みます。" +msgid "Terminating instance %s" +msgstr "Terminating instance: インスタンス %s を終了します。" -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:255 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." -msgstr "AMQPサーバーに %d 回接続を試みましたが、接続できませんでした。シャットダウンします。" +msgid "Deallocating address %s" +msgstr "アドレス %s の割当を解除(deallocate)します。" -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "キューに再接続しました。" +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "既に消去済みのインスタンス%sを消去しようとしました。" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "キューからメッセージの取得に失敗しました。" +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "Rebooting instance: インスタンス %s を再起動します。" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:287 #, python-format -msgid "Initing the Adapter Consumer for %s" -msgstr "%sのアダプターコンシューマー(Adapter Consumer)を初期化しています。" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"実行していないインスタンスを再起動しようとしました: %(instance_id)s (state: %(state)s 期待される状態: " +"%(running)s)" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:311 #, python-format -msgid "received %s" -msgstr "受信: %s" +msgid "instance %s: snapshotting" +msgstr "snapshotting: インスタンス %s のスナップショットを取得します。" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:316 #, python-format -msgid "no method for message: %s" -msgstr "メッセージ %s に対するメソッドが存在しません。" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"実行していないインスタンスのスナップショットを取得しようとしました: %(instance_id)s (state: %(state)s " +"期待される状態: %(running)s)" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:332 #, python-format -msgid "No method for message: %s" -msgstr "メッセージ %s に対するメソッドが存在しません。" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" +"実行していないインスタンスのパスワードをリセットしようとしました: %(instance_id)s (state: %(instance_state)s " +"期待される状態: %(expected_state)s)" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:335 #, python-format -msgid "Returning exception %s to caller" -msgstr "呼び出し元に 例外 %s を返却します。" +msgid "instance %s: setting admin password" +msgstr "インスタンス %s: admin password をセットします" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:353 #, python-format -msgid "unpacked context: %s" -msgstr "context %s をアンパックしました。" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" +"実行していないインスタンスにファイルをインジェクトしようとしました: %(instance_id)s (state: " +"%(instance_state)s 期待される状態: %(expected_state)s)" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "非同期呼び出しを実行します…" +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "インスタンス %(nm)s: ファイルを %(plain_path)s にインジェクトします" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:372 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_IDは %s です。" +msgid "instance %s: rescuing" +msgstr "Rescuing: インスタンス %s をレスキューします。" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:387 #, python-format -msgid "response %s" -msgstr "応答 %s" +msgid "instance %s: unrescuing" +msgstr "Unrescuing: インスタンス %s をアンレスキューします。" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:406 #, python-format -msgid "topic is %s" -msgstr "topic は %s です。" +msgid "instance %s: pausing" +msgstr "pausing: インスタンス %s を一時停止します。" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:423 #, python-format -msgid "message %s" -msgstr "メッセージ %s" +msgid "instance %s: unpausing" +msgstr "unpausing: インスタンス %s の一時停止を解除します。" -#: nova/service.py:157 +#: ../nova/compute/manager.py:440 #, python-format -msgid "Starting %s node" -msgstr "ノード %s を開始します。" +msgid "instance %s: retrieving diagnostics" +msgstr "retrieving diagnostics: インスタンス %s の診断情報を取得します。" -#: nova/service.py:169 -msgid "Service killed that has no database entry" -msgstr "データベースにエントリの存在しないサービスを終了します。" +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "suspending: インスタンス %s をサスペンドします。" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." -msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" +msgstr "resuming: インスタンス %s をレジュームします。" -#: nova/service.py:202 -msgid "Recovered model server connection!" -msgstr "モデルサーバへの接続を復旧しました。" +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "locking: インスタンス %s をロックします。" -#: nova/service.py:208 -msgid "model server went away" -msgstr "モデルサーバが消滅しました。" +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "unlocking: インスタンス %s のロックを解除します。" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." -msgstr "データストア %s に接続できません。 %d 秒後に再接続します。" +msgid "instance %s: getting locked state" +msgstr "getting locked state: インスタンス %s のロックを取得しました。" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Serving %s" -msgstr "%s サービスの開始" +msgid "instance %s: reset network" +msgstr "インスタンス %s: ネットワークをリセットします" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "FLAGSの一覧:" +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:543 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s が存在しません。デーモンは実行中ですか?\n" +msgid "instance %s: getting ajax console" +msgstr "インスタンス %s: ajax consoleを接続します" -#: nova/twistd.py:268 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Starting %s" -msgstr "%s を開始します。" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" +"インスタンス %(instance_id)s: ボリューム %(volume_id)s を %(mountpoint)s にアタッチします" + +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 +#, python-format +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "インスタンス %(instance_id)s: %(mountpoint)s へのアタッチに失敗しました。削除します。" + +#: ../nova/compute/manager.py:585 +#, python-format +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" +"インスタンス%(instance_id)s のマウントポイント %(mp)s からボリューム %(volume_id)s をデタッチします。" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "ボリュームを未知のインスタンス %s からデタッチします。" + +#: ../nova/scheduler/simple.py:53 +#, python-format +msgid "Host %s is not alive" +msgstr "ホスト %s は稼働していません。" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "全てのホストにコア数の空きがありません。" + +#: ../nova/scheduler/simple.py:87 +#, python-format +msgid "Host %s not available" +msgstr "ホスト %s は利用可能ではありません。" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "全てのホストが利用可能な容量(gigabytes)に達しています。" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "全てのホストがネットワークの最大数に達しています。" + +#: ../nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "%s 個のボリュームを再エクスポートします。" + +#: ../nova/volume/manager.py:90 +#, python-format +msgid "volume %s: skipping export" +msgstr "ボリューム %s のエキスポートをスキップします。" + +#: ../nova/volume/manager.py:96 +#, python-format +msgid "volume %s: creating" +msgstr "ボリューム%sを作成します。" + +#: ../nova/volume/manager.py:108 +#, python-format +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "ボリューム %(vol_name)s: サイズ %(vol_size)sG のlvを作成します。" + +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "ボリューム %s をエクスポートします。" + +#: ../nova/volume/manager.py:123 +#, python-format +msgid "volume %s: created successfully" +msgstr "ボリューム %s の作成に成功しました。" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "ボリュームはアタッチされたままです。" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "ボリュームはこのノードのローカルではありません。" + +#: ../nova/volume/manager.py:136 +#, python-format +msgid "volume %s: removing export" +msgstr "ボリューム %s のエクスポートを解除します。" + +#: ../nova/volume/manager.py:138 +#, python-format +msgid "volume %s: deleting" +msgstr "ボリューム %s を削除します。" + +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "ボリューム %s の削除に成功しました。" + +#: ../nova/virt/xenapi/fake.py:74 +#, python-format +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "NotImplemented 例外を発生させます。" + +#: ../nova/virt/xenapi/fake.py:306 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake には %s が実装されていません。" + +#: ../nova/virt/xenapi/fake.py:341 +#, python-format +msgid "Calling %(localname)s %(impl)s" +msgstr "%(localname)s %(impl)s を呼び出します。" + +#: ../nova/virt/xenapi/fake.py:346 +#, python-format +msgid "Calling getter %s" +msgstr "getter %s をコールします。" + +#: ../nova/virt/xenapi/fake.py:406 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "xenapi.fake に %s に関する実装がないか、引数の数が誤っています。" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "インスタンスのテストには実際の仮想環境が必要です。(fakeでは実行できません。)" + +#: ../nova/tests/test_cloud.py:268 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "インスタンス %s が実行するまで監視します…" + +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "ハイパーバイザへの接続に失敗しました。" + +#: ../nova/network/linux_net.py:187 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "VLANインタフェース %s を開始します。" + +#: ../nova/network/linux_net.py:208 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "%s 用のブリッジインタフェースを開始します。" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "dnsmasqに対してhupを送信しましたが %s が発生しました。" + +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d は無効です。dnsmasqを再実行します。" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" + +#: ../nova/network/linux_net.py:360 +#, python-format +msgid "Pid %d is stale, relaunching radvd" +msgstr "" + +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "dnsmasq をkillしましたが、 %s が発生しました。" + +#: ../nova/utils.py:58 #, python-format msgid "Inner Exception: %s" msgstr "内側で発生した例外: %s" -#: nova/utils.py:54 +#: ../nova/utils.py:59 #, python-format msgid "Class %s cannot be found" msgstr "クラス %s が見つかりません。" -#: nova/utils.py:113 +#: ../nova/utils.py:118 #, python-format msgid "Fetching %s" msgstr "ファイルをフェッチ: %s" -#: nova/utils.py:125 +#: ../nova/utils.py:130 #, python-format msgid "Running cmd (subprocess): %s" msgstr "コマンド実行(subprocess): %s" -#: nova/utils.py:138 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format msgid "Result was %s" msgstr "コマンド実行結果: %s" -#: nova/utils.py:171 +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" +msgstr "コマンド(SSH)を実行: %s" + +#: ../nova/utils.py:217 #, python-format msgid "debug in callback: %s" msgstr "コールバック中のデバッグ: %s" -#: nova/utils.py:176 +#: ../nova/utils.py:222 #, python-format msgid "Running %s" msgstr "コマンド実行: %s" -#: nova/utils.py:207 +#: ../nova/utils.py:262 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "IPを取得できません。127.0.0.1 を %s として使います。" +msgid "Link Local address is not found.:%s" +msgstr "リンクローカルアドレスが見つかりません: %s" -#: nova/utils.py:289 +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" + +#: ../nova/utils.py:363 #, python-format msgid "Invalid backend: %s" msgstr "不正なバックエンドです: %s" -#: nova/utils.py:300 +#: ../nova/utils.py:374 #, python-format msgid "backend %s" msgstr "バックエンドは %s です。" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "認証失敗の回数が多すぎます。" +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" + +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" +msgstr "ルート %s へパブリッシュ" + +#: ../nova/fakerabbit.py:84 +#, python-format +msgid "Declaring queue %s" +msgstr "queue %s の宣言" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "exchange %s の宣言" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" + +#: ../nova/fakerabbit.py:121 +#, python-format +msgid "Getting from %(queue)s: %(message)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 +#, python-format +msgid "Created VM %s..." +msgstr "VM %s を作成します。" + +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:171 +#, python-format +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "インスタンス %s のVBDが見つかりません。" + +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "VBD %s の unplug に失敗しました。" + +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "VBD %s の削除に失敗しました。" -#: nova/api/ec2/__init__.py:142 +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:227 +#, python-format +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." -msgstr "アクセスキー %s は %d 回認証に失敗したため、%d 分間ロックされます。" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "Authentication Failure: %s" -msgstr "%s の認証に失敗しました。" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Authenticated Request For %s:%s)" -msgstr "リクエストを認証しました: %s:%s" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "action: %s" -msgstr "アクション(action): %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "arg: %s\t\tval: %s" -msgstr "引数(arg): %s\t値(val): %s" +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" -msgstr "許可されていないリクエスト: controller=%s, action %sです。" +msgid "Glance image %s" +msgstr "" -#: nova/api/ec2/__init__.py:339 +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 #, python-format -msgid "NotFound raised: %s" -msgstr "NotFound 発生: %s" +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "ApiError raised: %s" -msgstr "APIエラー発生: %s" +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Unexpected error raised: %s" -msgstr "予期しないエラー発生: %s" +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." -msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "PV kernelのvdi %s を取得します。" -#: nova/api/ec2/admin.py:84 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Creating new user: %s" -msgstr "Creating new user: 新しいユーザ %s を作成します。" +msgid "PV Kernel in VDI:%s" +msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "Deleting user: %s" -msgstr "Deleting user: ユーザ %s を削除します。" +msgid "Running pygrub against %s" +msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Adding role %s to user %s for project %s" -msgstr "Adding role: ロール %s をユーザ %s、プロジェクト %s に追加します。" +msgid "Found Xen kernel %s" +msgstr "Xen Kernel %s が見つかりました。" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Adding sitewide role %s to user %s" -msgstr "Adding sitewide role: サイトワイドのロール %s をユーザ %s に追加します。" +msgid "duplicate name found: %s" +msgstr "%s は重複しています。" -#: nova/api/ec2/admin.py:122 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Removing role %s from user %s for project %s" -msgstr "Removing role: ロール %s をユーザ %s プロジェクト %s から削除します。" +msgid "VDI %s is still available" +msgstr "VDI %s は依然として存在しています。" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Removing sitewide role %s from user %s" -msgstr "Removing sitewide role: サイトワイドのロール %s をユーザ %s から削除します。" +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver の vm state -> |%s|" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" -msgstr "operation は add または remove の何れかである必要があります。" +#: ../nova/virt/xenapi/vm_utils.py:465 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi の power_state -> |%s|" + +#: ../nova/virt/xenapi/vm_utils.py:525 +#, python-format +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Getting x509 for user: %s on project: %s" -msgstr "Getting X509: x509の取得: ユーザ %s, プロジェクト %s" +msgid "Re-scanning SR %s" +msgstr "SR %s を再スキャンします。" -#: nova/api/ec2/admin.py:159 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Create project %s managed by %s" -msgstr "Create project: プロジェクト %s (%s により管理される)を作成します。" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Delete project: %s" -msgstr "Delete project: プロジェクト %s を削除しました。" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "VM %s にVDIが存在しません。" + +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 +#, python-format +msgid "Creating VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 +#, python-format +msgid "Creating VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 +#, python-format +msgid "Plugging VBD %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 +#, python-format +msgid "Destroying VBD for VDI %s ... " +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 +#, python-format +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 +#, python-format +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 +#, python-format +msgid "Ignoring XenAPI.Failure %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:735 +#, python-format +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." +msgstr "" + +#: ../nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %(queue)s, %(value)s" +msgstr "" + +#: ../nova/tests/test_rpc.py:95 +#, python-format +msgid "Nested return %s" +msgstr "ネストした戻り値: %s" + +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "%s を受信。" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" +msgstr "Request context を空とすることは非推奨です。" + +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" +msgstr "id %s のserviceが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "fixed ipが一つも定義されていません。" + +#: ../nova/db/sqlalchemy/api.py:608 +#, python-format +msgid "No floating ip for address %s" +msgstr "アドレス %s の floating ip が存在しません。" + +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" +msgstr "インスタンス %s に対するアドレスが見つかりません。" + +#: ../nova/db/sqlalchemy/api.py:961 +#, python-format +msgid "no keypair for user %(user_id)s, name %(name)s" +msgstr "ユーザ %(user_id)s 名前 %(name)s のキーペアがありません。" + +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 +#, python-format +msgid "No network for id %s" +msgstr "id %s に該当するnetwork が存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "ネットワークが定義されていません。" + +#: ../nova/db/sqlalchemy/api.py:1115 +#, python-format +msgid "No network for bridge %s" +msgstr "ブリッジ %s に該当する network が存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" +msgstr "instance %s に該当する network が存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1277 +#, python-format +msgid "Token %s does not exist" +msgstr "トークン %s が存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1302 +#, python-format +msgid "No quota for project_id %s" +msgstr "project_id %s に対するクオータが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 +#, python-format +msgid "Volume %s not found" +msgstr "ボリューム %s が見つかりません。" + +#: ../nova/db/sqlalchemy/api.py:1514 +#, python-format +msgid "No export device found for volume %s" +msgstr "ボリューム %s に関してエクスポートされているデバイスがありません。" + +#: ../nova/db/sqlalchemy/api.py:1527 +#, python-format +msgid "No target id found for volume %s" +msgstr "ボリューム %s に対する target idが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" +msgstr "id %s のセキュリティグループが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" +msgstr "プロジェクト %(project_id)s に対する名称 %(group_name)s のセキュリティグループが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "id %s のセキュリティグループルールが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" +msgstr "id %s のユーザが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1772 +#, python-format +msgid "No user for access key %s" +msgstr "アクセスキー %s に該当するユーザが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1834 +#, python-format +msgid "No project with id %s" +msgstr "id %s のプロジェクトが存在しません。" + +#: ../nova/db/sqlalchemy/api.py:1979 +#, python-format +msgid "No console pool with id %(pool_id)s" +msgstr "Id %(pool_id)s のコンソールプールがありません。" + +#: ../nova/db/sqlalchemy/api.py:1996 +#, python-format +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" +msgstr "プール %(pool_id)s に %(instance_id)s のコンソールがありません。" + +#: ../nova/db/sqlalchemy/api.py:2057 +#, python-format +msgid "on instance %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2058 +#, python-format +msgid "No console with id %(console_id)s %(idesc)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 +#, python-format +msgid "No zone with id %(zone_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:160 +#, python-format +msgid "Checking state of %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:165 +#, python-format +msgid "Current state of %(name)s was %(state)s." +msgstr "" + +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "libvirt %s へ接続します。" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" +msgstr "libvirtへの接続が切れています。" + +#: ../nova/virt/libvirt_conn.py:258 +#, python-format +msgid "instance %(instance_name)s: deleting instance files %(target)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:283 +#, python-format +msgid "Invalid device path %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:313 +#, python-format +msgid "No disk at %s" +msgstr "%s にディスクが存在しません。" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "インスタンスのスナップショットは現在libvirtに対してはサポートされていません。" + +#: ../nova/virt/libvirt_conn.py:336 +#, python-format +msgid "instance %s: rebooted" +msgstr "インスタンス%s: 再起動しました。" + +#: ../nova/virt/libvirt_conn.py:339 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "_wait_for_reboot 失敗: %s" + +#: ../nova/virt/libvirt_conn.py:382 +#, python-format +msgid "instance %s: rescued" +msgstr "インスタンス %s: rescued" + +#: ../nova/virt/libvirt_conn.py:385 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "_wait_for_rescue 失敗: %s" + +#: ../nova/virt/libvirt_conn.py:411 +#, python-format +msgid "instance %s: is running" +msgstr "インスタンス %s を起動中です。" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "Adding user %s to project %s" -msgstr "Adding user: ユーザ %s をプロジェクト %s に追加します。" +msgid "instance %s: booted" +msgstr "インスタンス %s: 起動しました。" -#: nova/api/ec2/admin.py:188 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "Removing user %s from project %s" -msgstr "Removing user: ユーザ %s をプロジェクト %s から削除します。" +msgid "instance %s: failed to boot" +msgstr "インスタンス %s の起動に失敗しました。" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" -msgstr "サポートされていないAPIリクエストです。 controller = %s,action = %s" +msgid "virsh said: %r" +msgstr "virsh の出力: %r" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" +msgstr "デバイスです。" + +#: ../nova/virt/libvirt_conn.py:448 +#, python-format +msgid "data: %(data)r, fpath: %(fpath)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 +#, python-format +msgid "instance %s: Creating image" +msgstr "インスタンス %s のイメージを生成します。" + +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:649 +#, python-format +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" +msgstr "" + +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 +#, python-format +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" +msgstr "" -#: nova/api/ec2/cloud.py:117 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "インスタンス %s: toXML メソッドを開始。" + +#: ../nova/virt/libvirt_conn.py:732 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "インスタンス %s: toXML メソッドを完了。" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 +#, python-format +msgid "Attempted to unfilter instance %s which is not filtered" +msgstr "" + +#: ../nova/api/ec2/metadatarequesthandler.py:76 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "ip %s に対するメタデータの取得に失敗しました。" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "シングルトンをインスタンス化しようとしました。" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "アドレスを割り当てようとしましたが、%s のクオータを超えました。" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "アドレスのクオータを超えました。これ以上アドレスを割り当てることはできません。" + +#: ../nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "ターゲット %s をアロケートしました。" + +#: ../nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "予備の(fallback)スケジューラを実装する必要があります。" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 +#, python-format +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" + +#: ../nova/api/ec2/cloud.py:62 +#, python-format +msgid "The key_pair %s already exists" +msgstr "" + +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format msgid "Generating root CA: %s" msgstr "ルートCA %s を生成しています。" -#: nova/api/ec2/cloud.py:277 +#: ../nova/api/ec2/cloud.py:303 #, python-format msgid "Create key pair %s" msgstr "Create key pair: キーペア %s を作成します。" -#: nova/api/ec2/cloud.py:285 +#: ../nova/api/ec2/cloud.py:311 #, python-format msgid "Delete key pair %s" msgstr "Delete key pair: キーペア %s を削除します。" -#: nova/api/ec2/cloud.py:357 +#: ../nova/api/ec2/cloud.py:386 #, python-format msgid "%s is not a valid ipProtocol" msgstr "%s は適切なipProtocolではありません。" -#: nova/api/ec2/cloud.py:361 +#: ../nova/api/ec2/cloud.py:390 msgid "Invalid port range" msgstr "ポートの範囲が不正です。" -#: nova/api/ec2/cloud.py:392 +#: ../nova/api/ec2/cloud.py:421 #, python-format msgid "Revoke security group ingress %s" msgstr "Revoke security group ingress: セキュリティグループ許可 %s の取消" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." +msgstr "" + +#: ../nova/api/ec2/cloud.py:443 msgid "No rule for the specified parameters." msgstr "指定されたパラメータに該当するルールがありません。" -#: nova/api/ec2/cloud.py:421 +#: ../nova/api/ec2/cloud.py:450 #, python-format msgid "Authorize security group ingress %s" msgstr "Authorize security group ingress: セキュリティグループ許可 %s" -#: nova/api/ec2/cloud.py:432 +#: ../nova/api/ec2/cloud.py:464 #, python-format msgid "This rule already exists in group %s" msgstr "指定されたルールは既にグループ %s に存在しています。" -#: nova/api/ec2/cloud.py:460 +#: ../nova/api/ec2/cloud.py:492 #, python-format msgid "Create Security Group %s" msgstr "Create Security Group: セキュリティグループ %s を作成します。" -#: nova/api/ec2/cloud.py:463 +#: ../nova/api/ec2/cloud.py:495 #, python-format msgid "group %s already exists" msgstr "グループ %s は既に存在しています。" -#: nova/api/ec2/cloud.py:475 +#: ../nova/api/ec2/cloud.py:507 #, python-format msgid "Delete security group %s" msgstr "Delete security group: セキュリティグループ %s を削除します。" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 -#, python-format -msgid "Get console output for instance %s" -msgstr "Get console output: インスタンス %s のコンソール出力を取得します。" - -#: nova/api/ec2/cloud.py:543 +#: ../nova/api/ec2/cloud.py:584 #, python-format msgid "Create volume of %s GB" msgstr "Create volume: %s GBのボリュームを作成します。" -#: nova/api/ec2/cloud.py:567 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "Attach volume %s to instacne %s at %s" -msgstr "Attach volume: ボリューム%s をインスタンス %s にデバイス %s でアタッチします。" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" +msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/api/ec2/cloud.py:629 #, python-format msgid "Detach volume %s" msgstr "Detach volume: ボリューム %s をデタッチします" -#: nova/api/ec2/cloud.py:686 +#: ../nova/api/ec2/cloud.py:761 msgid "Allocate address" msgstr "Allocate address: アドレスを割り当てます。" -#: nova/api/ec2/cloud.py:691 +#: ../nova/api/ec2/cloud.py:766 #, python-format msgid "Release address %s" msgstr "Release address: アドレス %s を開放します。" -#: nova/api/ec2/cloud.py:696 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "Associate address %s to instance %s" -msgstr "Associate address: アドレス %s をインスタンス %s に関連付けます。" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" +msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/api/ec2/cloud.py:780 #, python-format msgid "Disassociate address %s" msgstr "Disassociate address: アドレス %s の関連付けを解除します。" -#: nova/api/ec2/cloud.py:730 +#: ../nova/api/ec2/cloud.py:807 msgid "Going to start terminating instances" msgstr "インスタンス終了処理を開始します。" -#: nova/api/ec2/cloud.py:738 +#: ../nova/api/ec2/cloud.py:815 #, python-format msgid "Reboot instance %r" msgstr "Reboot instance: インスタンス %r を再起動します。" -#: nova/api/ec2/cloud.py:775 +#: ../nova/api/ec2/cloud.py:867 #, python-format msgid "De-registering image %s" msgstr "De-registering image: イメージ %s を登録解除します。" -#: nova/api/ec2/cloud.py:783 +#: ../nova/api/ec2/cloud.py:875 #, python-format -msgid "Registered image %s with id %s" -msgstr "Registered image: イメージ %s をid %s で登録します。" +msgid "Registered image %(image_location)s with id %(image_id)s" +msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format msgid "attribute not supported: %s" msgstr "アトリビュート %s はサポートされていません。" -#: nova/api/ec2/cloud.py:794 +#: ../nova/api/ec2/cloud.py:890 #, python-format msgid "invalid id: %s" msgstr "id %s は不正です。" -#: nova/api/ec2/cloud.py:807 +#: ../nova/api/ec2/cloud.py:903 msgid "user or group not specified" msgstr "ユーザまたはグループが指定されていません。" -#: nova/api/ec2/cloud.py:809 +#: ../nova/api/ec2/cloud.py:905 msgid "only group \"all\" is supported" msgstr "グループ \"all\" のみサポートされています。" -#: nova/api/ec2/cloud.py:811 +#: ../nova/api/ec2/cloud.py:907 msgid "operation_type must be add or remove" msgstr "operation_type は add または remove の何れかである必要があります。" -#: nova/api/ec2/cloud.py:812 +#: ../nova/api/ec2/cloud.py:908 #, python-format msgid "Updating image %s publicity" msgstr "イメージ %s の公開設定を更新します。" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../bin/nova-api.py:52 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "ip %s に対するメタデータの取得に失敗しました。" +msgid "Using paste.deploy config at: %s" +msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../bin/nova-api.py:57 #, python-format -msgid "Caught error: %s" -msgstr "エラー %s をキャッチしました。" +msgid "No paste configuration for app: %s" +msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." -msgstr "管理用オペレーション(admin operation)をAPIに登録します。" +#: ../bin/nova-api.py:59 +#, python-format +msgid "" +"App Config: %(api)s\n" +"%(config)r" +msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../bin/nova-api.py:64 #, python-format -msgid "Compute.api::lock %s" -msgstr "例外: Compute.api::lock %s" +msgid "Running %s API" +msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../bin/nova-api.py:69 #, python-format -msgid "Compute.api::unlock %s" -msgstr "例外: Compute.api::unlock %s" +msgid "No known API applications configured in %s." +msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../bin/nova-api.py:83 #, python-format -msgid "Compute.api::get_lock %s" -msgstr "例外: Compute.api::get_lock %s" +msgid "Starting nova-api node (version %s)" +msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../bin/nova-api.py:89 #, python-format -msgid "Compute.api::pause %s" -msgstr "例外: Compute.api::pause %s" +msgid "No paste configuration found for: %s" +msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "Compute.api::unpause %s" -msgstr "例外: Compute.api::unpause %s" +msgid "Argument %(key)s value %(value)s is too short." +msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "compute.api::suspend %s" -msgstr "例外: compute.api::suspend %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." +msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "compute.api::resume %s" -msgstr "例外: compute.api::resume %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." +msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "User %s already exists" -msgstr "ユーザー %s は既に存在しています。" +msgid "Argument %s is required." +msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "Project can't be created because manager %s doesn't exist" -msgstr "マネージャ %s が存在しないためプロジェクトを作成できません。" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." +msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "Project can't be created because project %s already exists" -msgstr "プロジェクト %s が既に存在するためプロジェクトを作成できません。" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." +msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" -msgstr "マネージャ %s が存在しないためプロジェクトを更新できません。" +msgid "Attempted to create non-unique name %s" +msgstr "ユニークではないname %s を作成しようとしました。" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "User \"%s\" not found" -msgstr "ユーザ \"%s\" が見つかりません。" +msgid "instance %(name)s: not enough free memory" +msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "Project \"%s\" not found" -msgstr "プロジェクト \"%s\" が見つかりません。" +msgid "Starting VM %s..." +msgstr "VM %s を開始します…" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "シングルトンをインスタンス化しようとしました。" +#: ../nova/virt/xenapi/vmops.py:151 +#, python-format +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." +msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "LDAP object for %s doesn't exist" -msgstr "LDAPオブジェクト %s が存在しません。" +msgid "Invalid value for onset_files: '%s'" +msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "Project can't be created because user %s doesn't exist" -msgstr "ユーザ %s が存在しないためプロジェクトを作成できません。" +msgid "Injecting file path: '%s'" +msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "User %s is already a member of the group %s" -msgstr "ユーザ %s は既にグループ %s のメンバーです。" +msgid "Instance %s: booted" +msgstr "インスタンス%s: ブートしました。" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." -msgstr "グループの最後のメンバーを削除しようとしました。代わりにグループ %s を削除してください。" +msgid "Instance not present %s" +msgstr "インスタンス%s が存在しません。" -#: nova/auth/ldapdriver.py:528 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "Group at dn %s doesn't exist" -msgstr "dnが %s のグループは存在しません。" +msgid "Starting snapshot for VM %s" +msgstr "VM %s に対するスナップショットを開始します。" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Looking up user: %r" -msgstr "ユーザ %r を検索します。" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" +msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Failed authorization for access key %s" -msgstr "Failed authorization: アクセスキー %s の認証に失敗しました。" +msgid "Finished snapshot and upload for VM %s" +msgstr "VM %s のスナップショットとアップロードが完了しました。" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "No user found for access key %s" -msgstr "アクセスキー %s に対するユーザが見つかりませんでした。" +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Using project name = user name (%s)" -msgstr "ユーザ名 (%s) をプロジェクト名として使用します。" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/virt/xenapi/vmops.py:564 #, python-format -msgid "failed authorization: no project named %s (user=%s)" -msgstr "Failed authorization: 認証に失敗しました。プロジェクト名 %s (ユーザ = %s) は存在しません。" +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" +msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "No project called %s could be found" -msgstr "プロジェクト %s は見つかりませんでした。" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" +msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "OpenSSL error: %s" msgstr "" -"Failed authorization: 認証に失敗しました: ユーザ %s は管理者ではなくかつプロジェクト %s のメンバーではありません。" -#: nova/auth/manager.py:283 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "User %s is not a member of project %s" -msgstr "ユーザ %s はプロジェクト %s のメンバーではありません。" +msgid "Running instances: %s" +msgstr "インスタンス %s は実行中です。" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Invalid signature for user %s" -msgstr "Invalid signature: ユーザ %s の署名が不正です。" +msgid "After terminating instances: %s" +msgstr "インスタンス %s を終了した後です。" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" -msgstr "署名が一致しません。" +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "cloudpipeインスタンス起動時に実行するスクリプトのテンプレート" -#: nova/auth/manager.py:374 -msgid "Must specify project" -msgstr "プロジェクトを指定してください。" +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "openvpnの設定に入れるネットワークの値" -#: nova/auth/manager.py:408 -#, python-format -msgid "The %s role can not be found" -msgstr "ロール %s が見つかりません。" +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "openvpnの設定に入れるネットマスクの値" -#: nova/auth/manager.py:410 +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "The %s role is global only" -msgstr "ロール %s はグローバルでのみ使用可能です。" +msgid "Launching VPN for %s" +msgstr "%s 用のVPNを起動します。" -#: nova/auth/manager.py:412 +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." +msgstr "" + +#: ../nova/image/s3.py:99 #, python-format -msgid "Adding role %s to user %s in project %s" -msgstr "Adding role: ロール %s をユーザ %s (プロジェクト %s の) に追加します。" +msgid "Image %s could not be found" +msgstr "イメージ %s が見つかりませんでした。" -#: nova/auth/manager.py:438 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "認証失敗の回数が多すぎます。" + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "Removing role %s from user %s on project %s" -msgstr "Removing role: ロール %s をユーザ %s (プロジェクト %s の)から削除します。" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." +msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "Created project %s with manager %s" -msgstr "Created project: プロジェクト %s (マネージャ %s)を作成します。" +msgid "Authentication Failure: %s" +msgstr "%s の認証に失敗しました。" -#: nova/auth/manager.py:523 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "modifying project %s" -msgstr "modifying project: プロジェクト %s を更新します。" +msgid "Authenticated Request For %(uname)s:%(pname)s)" +msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "Remove user %s from project %s" -msgstr "Remove user: ユーザ %s をプロジェクト %s から削除します。" +msgid "action: %s" +msgstr "アクション(action): %s" -#: nova/auth/manager.py:581 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "Deleting project %s" -msgstr "Deleting project: プロジェクト %s を削除します。" +msgid "arg: %(key)s\t\tval: %(value)s" +msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "Created user %s (admin: %r)" -msgstr "Created user: ユーザ %s (admin: %r) を作成しました。" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Deleting user %s" -msgstr "Deleting user: ユーザ %s を削除します。" +msgid "InstanceNotFound raised: %s" +msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Access Key change for user %s" -msgstr "Access Key change: ユーザ %s のアクセスキーを更新します。" +msgid "VolumeNotFound raised: %s" +msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/api/ec2/__init__.py:326 #, python-format -msgid "Secret Key change for user %s" -msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" +msgid "NotFound raised: %s" +msgstr "NotFound 発生: %s" -#: nova/auth/manager.py:659 +#: ../nova/api/ec2/__init__.py:329 #, python-format -msgid "Admin status set to %r for user %s" -msgstr "Admin status set: 管理者ステータス %r をユーザ %s に設定します。" +msgid "ApiError raised: %s" +msgstr "APIエラー発生: %s" -#: nova/auth/manager.py:708 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "No vpn data for project %s" -msgstr "プロジェクト %s に関するvpnデータがありません。" +msgid "Unexpected error raised: %s" +msgstr "予期しないエラー発生: %s" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" -msgstr "cloudpipeインスタンス起動時に実行するスクリプトのテンプレート" +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "未知のエラーが発生しました。再度リクエストを実行してください。" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" -msgstr "openvpnの設定に入れるネットワークの値" +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "ユーザー %s は既に存在しています。" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" -msgstr "openvpnの設定に入れるネットマスクの値" +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "マネージャ %s が存在しないためプロジェクトを作成できません。" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Launching VPN for %s" -msgstr "%s 用のVPNを起動します。" +msgid "Project can't be created because user %s doesn't exist" +msgstr "ユーザ %s が存在しないためプロジェクトを作成できません。" -#: nova/compute/api.py:67 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Instance %d was not found in get_network_topic" -msgstr "get_network_topicにおいてインスタンス %d が見つかりませんでした。" +msgid "Project can't be created because project %s already exists" +msgstr "プロジェクト %s が既に存在するためプロジェクトを作成できません。" -#: nova/compute/api.py:73 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Instance %d has no host" -msgstr "インスタンス %d にホストが登録されていません。" +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "マネージャ %s が存在しないためプロジェクトを更新できません。" -#: nova/compute/api.py:92 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" -msgstr "%s のクオータ上限を超えました。%s インスタンスを実行しようとしました。" +msgid "User \"%s\" not found" +msgstr "ユーザ \"%s\" が見つかりません。" -#: nova/compute/api.py:94 +#: ../nova/auth/dbdriver.py:248 #, python-format +msgid "Project \"%s\" not found" +msgstr "プロジェクト \"%s\" が見つかりません。" + +#: ../nova/virt/xenapi_conn.py:129 msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." -msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" +"connection_type=xenapi を使用するには、以下の指定が必要です: xenapi_connection_url, " +"xenapi_connection_username (オプション), xenapi_connection_password" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" -msgstr "raw instanceを生成します。" +#: ../nova/virt/xenapi_conn.py:311 +#, python-format +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" -#: nova/compute/api.py:156 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Going to run %s instances..." -msgstr "%s 個のインスタンスの起動を始めます…" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" +msgstr "" -#: nova/compute/api.py:180 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" -msgstr "スケジューラに対して %s/%s のインスタンス %s を送信します。" +msgid "Got exception: %s" +msgstr "例外 %s が発生しました。" -#: nova/compute/api.py:279 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Going to try and terminate %s" -msgstr "%s を終了します。" +msgid "updating %s..." +msgstr "%s の情報の更新…" -#: nova/compute/api.py:283 +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "更新の最中に予期しないエラーが発生しました。" + +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Instance %d was not found during terminate" -msgstr "インスタンス %d が終了処理において見つかりませんでした。" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" +msgstr "" -#: nova/compute/api.py:288 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Instance %d is already being terminated" -msgstr "インスタンス %d は既に終了済みです。" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" -#: nova/compute/api.py:450 +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "接続に際し予期しないエラーが発生しました。" + +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" -msgstr "デバイスの指定 %s が不正です: デバイス指定の例: /dev/vdb" +msgid "Found instance: %s" +msgstr "インスタンス %s が見つかりました。" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" -msgstr "ボリュームはどこにもアタッチされていません。" +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" +msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" -msgstr "インプットパーティションサイズがセクターサイズで割り切れません。 %d / %d" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" +msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" -msgstr "ローカルストレージのバイト数がセクターサイズで割り切れません: %d / %d" +msgid "Caught error: %s" +msgstr "エラー %s をキャッチしました。" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "管理用オペレーション(admin operation)をAPIに登録します。" -#: nova/compute/disk.py:128 +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 #, python-format -msgid "Could not attach image to loopback: %s" -msgstr "イメージをループバック %s にアタッチできません。" +msgid "Re-wrote %s" +msgstr "" + +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" -#: nova/compute/disk.py:136 +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format msgid "Failed to load partition: %s" msgstr "パーティション %s のロードに失敗しました。" -#: nova/compute/disk.py:158 +#: ../nova/virt/disk.py:91 #, python-format msgid "Failed to mount filesystem: %s" msgstr "ファイルシステム %s のマウントに失敗しました。" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Unknown instance type: %s" -msgstr "%s は未知のインスタンスタイプです。" +msgid "nbd device %s did not show up" +msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/disk.py:128 #, python-format -msgid "check_instance_lock: decorating: |%s|" -msgstr "check_instance_lock: decorating: |%s|" +msgid "Could not attach image to loopback: %s" +msgstr "イメージをループバック %s にアタッチできません。" -#: nova/compute/manager.py:71 -#, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" -msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" +msgstr "" -#: nova/compute/manager.py:75 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "check_instance_lock: locked: |%s|" -msgstr "check_instance_lock: locked: |%s|" +msgid "%(filename)s, line %(line_info)d" +msgstr "" -#: nova/compute/manager.py:77 -#, python-format -msgid "check_instance_lock: admin: |%s|" -msgstr "check_instance_lock: admin: |%s|" +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "In init host" -#: nova/compute/manager.py:82 +#: ../nova/virt/hyperv.py:131 #, python-format -msgid "check_instance_lock: executing: |%s|" -msgstr "check_instance_lock: executing: |%s|" +msgid "Attempt to create duplicate vm %s" +msgstr "VM %s を二重に作成しようとしました。" -#: nova/compute/manager.py:86 +#: ../nova/virt/hyperv.py:148 #, python-format -msgid "check_instance_lock: not executing |%s|" -msgstr "check_instance_lock: not executing |%s|" - -#: nova/compute/manager.py:157 -msgid "Instance has already been created" -msgstr "インスタンスは既に生成されています。" +msgid "Starting VM %s " +msgstr "VM %s を開始します。 " -#: nova/compute/manager.py:158 +#: ../nova/virt/hyperv.py:150 #, python-format -msgid "instance %s: starting..." -msgstr "インスタンス %s を開始します。" +msgid "Started VM %s " +msgstr "VM %s を開始しました。 " -#: nova/compute/manager.py:197 +#: ../nova/virt/hyperv.py:152 #, python-format -msgid "instance %s: Failed to spawn" -msgstr "インスタンス %s の起動に失敗しました。" +msgid "spawn vm failed: %s" +msgstr "vmの生成(spawn)に失敗しました: %s" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/virt/hyperv.py:169 #, python-format -msgid "Terminating instance %s" -msgstr "Terminating instance: インスタンス %s を終了します。" +msgid "Failed to create VM %s" +msgstr "VM %s の作成に失敗しました。" -#: nova/compute/manager.py:217 +#: ../nova/virt/hyperv.py:188 #, python-format -msgid "Disassociating address %s" -msgstr "アドレス %s の関連付けを解除(disassociate)しています。" +msgid "Set memory for vm %s..." +msgstr "vm %s のメモリを設定します。" -#: nova/compute/manager.py:230 +#: ../nova/virt/hyperv.py:198 #, python-format -msgid "Deallocating address %s" -msgstr "アドレス %s の割当を解除(deallocate)します。" +msgid "Set vcpus for vm %s..." +msgstr "vm %s のvcpus を設定します。" -#: nova/compute/manager.py:243 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "trying to destroy already destroyed instance: %s" -msgstr "既に消去済みのインスタンス%sを消去しようとしました。" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" +msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/virt/hyperv.py:227 #, python-format -msgid "Rebooting instance %s" -msgstr "Rebooting instance: インスタンス %s を再起動します。" +msgid "Failed to add diskdrive to VM %s" +msgstr "VM %s へのディスクドライブの追加に失敗しました。" -#: nova/compute/manager.py:260 +#: ../nova/virt/hyperv.py:230 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" -msgstr "実行していないインスタンスの再起動を試みます。%s (状態: %s 期待する状態: %s)" +msgid "New disk drive path is %s" +msgstr "新しいドライブパスは %s です。" -#: nova/compute/manager.py:286 +#: ../nova/virt/hyperv.py:247 #, python-format -msgid "instance %s: snapshotting" -msgstr "snapshotting: インスタンス %s のスナップショットを取得します。" +msgid "Failed to add vhd file to VM %s" +msgstr "vhdファイルの VM %s への追加に失敗しました。" -#: nova/compute/manager.py:289 +#: ../nova/virt/hyperv.py:249 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" -msgstr "実行していないインスタンスのスナップショット取得を試みます。%s (状態: %s 期待する状態: %s)" +msgid "Created disk for %s" +msgstr "%s に diskを作成します。" -#: nova/compute/manager.py:301 +#: ../nova/virt/hyperv.py:253 #, python-format -msgid "instance %s: rescuing" -msgstr "Rescuing: インスタンス %s をレスキューします。" +msgid "Creating nic for %s " +msgstr "%s にNICを作成します。 " -#: nova/compute/manager.py:316 -#, python-format -msgid "instance %s: unrescuing" -msgstr "Unrescuing: インスタンス %s をアンレスキューします。" +#: ../nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "外部vswitchへのポート作成に失敗しました。" -#: nova/compute/manager.py:335 +#: ../nova/virt/hyperv.py:273 #, python-format -msgid "instance %s: pausing" -msgstr "pausing: インスタンス %s を一時停止します。" +msgid "Failed creating port for %s" +msgstr "ポート %s の作成に失敗しました。" -#: nova/compute/manager.py:352 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "instance %s: unpausing" -msgstr "unpausing: インスタンス %s の一時停止を解除します。" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" +msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/virt/hyperv.py:286 #, python-format -msgid "instance %s: retrieving diagnostics" -msgstr "retrieving diagnostics: インスタンス %s の診断情報を取得します。" +msgid "Failed to add nic to VM %s" +msgstr "VM %s に対してNICの追加に失敗しました。" -#: nova/compute/manager.py:382 +#: ../nova/virt/hyperv.py:288 #, python-format -msgid "instance %s: suspending" -msgstr "suspending: インスタンス %s をサスペンドします。" +msgid "Created nic for %s " +msgstr "%s のNICを作成しました。 " -#: nova/compute/manager.py:401 +#: ../nova/virt/hyperv.py:321 #, python-format -msgid "instance %s: resuming" -msgstr "resuming: インスタンス %s をレジュームします。" +msgid "WMI job failed: %s" +msgstr "WMIジョブに失敗しました: %s" -#: nova/compute/manager.py:420 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "instance %s: locking" -msgstr "locking: インスタンス %s をロックします。" +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " +msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/virt/hyperv.py:361 #, python-format -msgid "instance %s: unlocking" -msgstr "unlocking: インスタンス %s のロックを解除します。" +msgid "Got request to destroy vm %s" +msgstr "destroy vm %s リクエストを受信しました。" -#: nova/compute/manager.py:442 +#: ../nova/virt/hyperv.py:386 #, python-format -msgid "instance %s: getting locked state" -msgstr "getting locked state: インスタンス %s のロックを取得しました。" +msgid "Failed to destroy vm %s" +msgstr "vm %s の削除に失敗しました。" -#: nova/compute/manager.py:462 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "instance %s: attaching volume %s to %s" -msgstr "attaching volume: インスタンス %s についてボリューム %s を %s にアタッチします。" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" +msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/virt/hyperv.py:415 #, python-format -msgid "instance %s: attach failed %s, removing" -msgstr "インスタンス %s: %sのアタッチに失敗しました。リムーブします。" +msgid "" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" +msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" -msgstr "Detach volume: ボリューム %s をマウントポイント %s (インスタンス%s)からデタッチします。" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" +msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Detaching volume from unknown instance %s" -msgstr "ボリュームを未知のインスタンス %s からデタッチします。" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" +msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/compute/api.py:71 #, python-format -msgid "updating %s..." -msgstr "%s の情報の更新…" - -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "更新の最中に予期しないエラーが発生しました。" +msgid "Instance %d was not found in get_network_topic" +msgstr "get_network_topicにおいてインスタンス %d が見つかりませんでした。" -#: nova/compute/monitor.py:355 +#: ../nova/compute/api.py:77 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" -msgstr "ブロックデバイス \"%s\" の統計を \"%s\" について取得できません。" +msgid "Instance %d has no host" +msgstr "インスタンス %d にホストが登録されていません。" -#: nova/compute/monitor.py:377 +#: ../nova/compute/api.py:97 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" -msgstr "インタフェース \"%s\" の統計を \"%s\" について取得できません。" - -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" -msgstr "接続に際し予期しないエラーが発生しました。" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" +msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/compute/api.py:99 #, python-format -msgid "Found instance: %s" -msgstr "インスタンス %s が見つかりました。" +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." +msgstr "インスタンスのクオータを超えました。このタイプにおいてはあと %s インスタンスしか実行できません。" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" -msgstr "Request context を空とすることは非推奨です。" +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "raw instanceを生成します。" -#: nova/db/sqlalchemy/api.py:132 +#: ../nova/compute/api.py:160 #, python-format -msgid "No service for id %s" -msgstr "id %s のserviceが存在しません。" +msgid "Going to run %s instances..." +msgstr "%s 個のインスタンスの起動を始めます…" -#: nova/db/sqlalchemy/api.py:229 +#: ../nova/compute/api.py:187 #, python-format -msgid "No service for %s, %s" -msgstr "%s, %s のserviceが存在しません。" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" +msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../nova/compute/api.py:292 #, python-format -msgid "No floating ip for address %s" -msgstr "アドレス %s の floating ip が存在しません。" +msgid "Going to try to terminate %s" +msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../nova/compute/api.py:296 #, python-format -msgid "No instance for id %s" -msgstr "id %s のinstanceが存在しません。" +msgid "Instance %d was not found during terminate" +msgstr "インスタンス %d が終了処理において見つかりませんでした。" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../nova/compute/api.py:301 #, python-format -msgid "Instance %s not found" -msgstr "インスタンス %s が見つかりません。" +msgid "Instance %d is already being terminated" +msgstr "インスタンス %d は既に終了済みです。" -#: nova/db/sqlalchemy/api.py:891 +#: ../nova/compute/api.py:481 #, python-format -msgid "no keypair for user %s, name %s" -msgstr "ユーザ %s, ネーム%s に該当するキーペアが存在しません。" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "デバイスの指定 %s が不正です: デバイス指定の例: /dev/vdb" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 -#, python-format -msgid "No network for id %s" -msgstr "id %s に該当するnetwork が存在しません。" +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "ボリュームはどこにもアタッチされていません。" -#: nova/db/sqlalchemy/api.py:1036 +#: ../nova/rpc.py:98 #, python-format -msgid "No network for bridge %s" -msgstr "ブリッジ %s に該当する network が存在しません。" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." +msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../nova/rpc.py:103 #, python-format -msgid "No network for instance %s" -msgstr "instance %s に該当する network が存在しません。" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "AMQPサーバーに %d 回接続を試みましたが、接続できませんでした。シャットダウンします。" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "キューに再接続しました。" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "キューからメッセージの取得に失敗しました。" -#: nova/db/sqlalchemy/api.py:1180 +#: ../nova/rpc.py:159 #, python-format -msgid "Token %s does not exist" -msgstr "トークン %s が存在しません。" +msgid "Initing the Adapter Consumer for %s" +msgstr "%sのアダプターコンシューマー(Adapter Consumer)を初期化しています。" -#: nova/db/sqlalchemy/api.py:1205 +#: ../nova/rpc.py:178 #, python-format -msgid "No quota for project_id %s" -msgstr "project_id %s に対するクオータが存在しません。" +msgid "received %s" +msgstr "受信: %s" -#: nova/db/sqlalchemy/api.py:1356 +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 #, python-format -msgid "No volume for id %s" -msgstr "id %s に該当するボリュームが存在しません。" +msgid "no method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/rpc.py:192 #, python-format -msgid "Volume %s not found" -msgstr "ボリューム %s が見つかりません。" +msgid "No method for message: %s" +msgstr "メッセージ %s に対するメソッドが存在しません。" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/rpc.py:253 #, python-format -msgid "No export device found for volume %s" -msgstr "ボリューム %s に関してエクスポートされているデバイスがありません。" +msgid "Returning exception %s to caller" +msgstr "呼び出し元に 例外 %s を返却します。" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/rpc.py:294 #, python-format -msgid "No target id found for volume %s" -msgstr "ボリューム %s に対する target idが存在しません。" +msgid "unpacked context: %s" +msgstr "context %s をアンパックしました。" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "非同期呼び出しを実行します…" + +#: ../nova/rpc.py:316 #, python-format -msgid "No security group with id %s" -msgstr "id %s のセキュリティグループが存在しません。" +msgid "MSG_ID is %s" +msgstr "MSG_IDは %s です。" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." +msgstr "" + +#: ../nova/rpc.py:364 #, python-format -msgid "No security group named %s for project: %s" -msgstr "セキュリティグループ名 %s がプロジェクト %s に存在しません。" +msgid "response %s" +msgstr "応答 %s" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/rpc.py:373 #, python-format -msgid "No secuity group rule with id %s" -msgstr "id %s のセキュリティグループルールが存在しません。" +msgid "topic is %s" +msgstr "topic は %s です。" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/rpc.py:374 #, python-format -msgid "No user for id %s" -msgstr "id %s のユーザが存在しません。" +msgid "message %s" +msgstr "メッセージ %s" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/volume/driver.py:78 #, python-format -msgid "No user for access key %s" -msgstr "アクセスキー %s に該当するユーザが存在しません。" +msgid "Recovering from a failed execute. Try number %s" +msgstr "実行失敗からリカバリーします。%s 回目のトライ。" -#: nova/db/sqlalchemy/api.py:1728 +#: ../nova/volume/driver.py:87 #, python-format -msgid "No project with id %s" -msgstr "id %s のプロジェクトが存在しません。" +msgid "volume group %s doesn't exist" +msgstr "ボリュームグループ%sが存在しません。" -#: nova/image/glance.py:78 +#: ../nova/volume/driver.py:220 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" -msgstr "Parallax がHTTPエラー%d を /images に対するリクエストに対して返しました。" +msgid "FAKE AOE: %s" +msgstr "偽のAOE: %s" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" -#: nova/image/glance.py:97 +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" -msgstr "Parallax がHTTPエラー %d を /images/detail に対するリクエストに対して返しました" +msgid "FAKE ISCSI: %s" +msgstr "偽のISCSI: %s" -#: nova/image/s3.py:82 +#: ../nova/volume/driver.py:359 #, python-format -msgid "Image %s could not be found" -msgstr "イメージ %s が見つかりませんでした。" +msgid "rbd has no pool %s" +msgstr "" -#: nova/network/api.py:39 +#: ../nova/volume/driver.py:414 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" -msgstr "アドレスを割り当てようとしましたが、%s のクオータを超えました。" +msgid "Sheepdog is not working: %s" +msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" -msgstr "アドレスのクオータを超えました。これ以上アドレスを割り当てることはできません。" +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/wsgi.py:68 #, python-format -msgid "Starting VLAN inteface %s" -msgstr "VLANインタフェース %s を開始します。" +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" -#: nova/network/linux_net.py:186 -#, python-format -msgid "Starting Bridge interface for %s" -msgstr "%s 用のブリッジインタフェースを開始します。" +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" -#: nova/network/linux_net.py:254 +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 #, python-format -msgid "Hupping dnsmasq threw %s" -msgstr "dnsmasqに対してhupを送信しましたが %s が発生しました。" +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/virt/fake.py:239 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" -msgstr "Pid %d は無効です。dnsmasqを再実行します。" +msgid "Instance %s Not Found" +msgstr "インスタンス %s が見つかりません。" -#: nova/network/linux_net.py:334 +#: ../nova/network/manager.py:153 #, python-format -msgid "Killing dnsmasq threw %s" -msgstr "dnsmasq をkillしましたが、 %s が発生しました。" +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "無効になった %s 個の fixed ip を割当解除しました。" -#: nova/network/manager.py:135 +#: ../nova/network/manager.py:157 msgid "setting network host" msgstr "ネットワークホストの設定をします。" -#: nova/network/manager.py:190 +#: ../nova/network/manager.py:212 #, python-format msgid "Leasing IP %s" msgstr "IP %s をリースします。" -#: nova/network/manager.py:194 +#: ../nova/network/manager.py:216 #, python-format msgid "IP %s leased that isn't associated" msgstr "IP %s がリースされましたが関連付けられていません。" -#: nova/network/manager.py:197 +#: ../nova/network/manager.py:220 #, python-format -msgid "IP %s leased to bad mac %s vs %s" -msgstr "IP %s が期待した mac %s ではなく %s にリースされました。" +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" -#: nova/network/manager.py:205 +#: ../nova/network/manager.py:228 #, python-format msgid "IP %s leased that was already deallocated" msgstr "既に割当解除しているIP %s がリースされました。" -#: nova/network/manager.py:214 +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" +msgstr "" + +#: ../nova/network/manager.py:237 #, python-format msgid "IP %s released that isn't associated" msgstr "割り当てていないIP %s が開放されました。" -#: nova/network/manager.py:217 +#: ../nova/network/manager.py:241 #, python-format -msgid "IP %s released from bad mac %s vs %s" -msgstr "IP %s がmac %s ではない mac %s への割当から開放されました。" +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" +msgstr "" -#: nova/network/manager.py:220 +#: ../nova/network/manager.py:244 #, python-format msgid "IP %s released that was not leased" msgstr "リースしていないIP %s が開放されました。" -#: nova/network/manager.py:442 +#: ../nova/network/manager.py:519 +msgid "" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" -msgstr "無効になった %s 個の fixed ip を割当解除しました。" +msgid "Introducing %s..." +msgstr "%s を introduce します…" + +#: ../nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %(label)s as %(sr_ref)s." +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "Storage Repository を作成できません。" + +#: ../nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "VBD %s から SRを取得できません。" + +#: ../nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "SR %s をforgetします。 " + +#: ../nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" +msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "SR %s のforgetが完了。" + +#: ../nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "SR %s のVDIのintroduceができません。" + +#: ../nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "VDI %s のレコードを取得できません。" + +#: ../nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "SR %s のVDIをintroduceできません。" + +#: ../nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "マウントポイントを変換できません。 %s" + +#: ../nova/objectstore/image.py:262 +#, python-format +msgid "Failed to decrypt private key: %s" +msgstr "" + +#: ../nova/objectstore/image.py:269 +#, python-format +msgid "Failed to decrypt initialization vector: %s" +msgstr "" + +#: ../nova/objectstore/image.py:277 +#, python-format +msgid "Failed to decrypt image file %(image_file)s: %(err)s" +msgstr "" + +#: ../nova/objectstore/handler.py:106 #, python-format msgid "Unknown S3 value type %r" msgstr "未知のS3 value type %r です。" -#: nova/objectstore/handler.py:137 +#: ../nova/objectstore/handler.py:137 msgid "Authenticated request" msgstr "認証リクエスト" -#: nova/objectstore/handler.py:182 +#: ../nova/objectstore/handler.py:182 msgid "List of buckets requested" msgstr "List of buckets が呼ばれました。" -#: nova/objectstore/handler.py:209 +#: ../nova/objectstore/handler.py:209 #, python-format msgid "List keys for bucket %s" msgstr "バケット %s のキーの一覧" -#: nova/objectstore/handler.py:217 +#: ../nova/objectstore/handler.py:217 #, python-format msgid "Unauthorized attempt to access bucket %s" msgstr "Unauthorized attempt to access bucket: バケット %s に対するアクセスは許可されていません。" -#: nova/objectstore/handler.py:235 +#: ../nova/objectstore/handler.py:235 #, python-format msgid "Creating bucket %s" msgstr "バケットを作成します。 %s" -#: nova/objectstore/handler.py:245 +#: ../nova/objectstore/handler.py:245 #, python-format msgid "Deleting bucket %s" msgstr "バケットを削除します。 %s" -#: nova/objectstore/handler.py:249 +#: ../nova/objectstore/handler.py:249 #, python-format msgid "Unauthorized attempt to delete bucket %s" msgstr "Unauthorized attempt to delete bucket: バケット %s に対する削除は許可されていません。" -#: nova/objectstore/handler.py:271 +#: ../nova/objectstore/handler.py:273 +#, python-format +msgid "Getting object: %(bname)s / %(nm)s" +msgstr "" + +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Getting object: %s / %s" -msgstr "オブジェクトの取得: %s / %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" +msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -"Unauthorized attempt to get object: オブジェクト %s のバケット %s からの取得は許可されていません。" -#: nova/objectstore/handler.py:292 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Putting object: %s / %s" -msgstr "オブジェクトの格納:: %s / %s" +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" +msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -"Unauthorized attempt to upload: オブジェクト %s のバケット %s へのアップロードは許可されていません。" -#: nova/objectstore/handler.py:314 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Deleting object: %s / %s" -msgstr "オブジェクトを削除しています。: %s / %s" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" +msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/objectstore/handler.py:396 #, python-format msgid "Not authorized to upload image: invalid directory %s" msgstr "" "Not authorized to upload image: イメージの格納は許可されていません。ディレクトリ %s は正しくありません。" -#: nova/objectstore/handler.py:401 +#: ../nova/objectstore/handler.py:404 #, python-format msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" "Not authorized to upload image: イメージの格納は許可されていません。バケット %s への格納は許可されていません。" -#: nova/objectstore/handler.py:406 +#: ../nova/objectstore/handler.py:409 #, python-format msgid "Starting image upload: %s" msgstr "イメージのアップロードを開始しました。 %s" -#: nova/objectstore/handler.py:420 +#: ../nova/objectstore/handler.py:423 #, python-format msgid "Not authorized to update attributes of image %s" msgstr "Not authorized to update attributes: イメージ %s のアトリビュートの更新は許可されていません。" -#: nova/objectstore/handler.py:428 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "Toggling publicity flag of image %s %r" -msgstr "Toggling publicity flag: イメージ %s の公開フラグを %r に更新します。" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" +msgstr "" -#: nova/objectstore/handler.py:433 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format msgid "Updating user fields on image %s" msgstr "Updating user fields: イメージ %s のユーザフィールドを更新します。" -#: nova/objectstore/handler.py:447 +#: ../nova/objectstore/handler.py:450 #, python-format msgid "Unauthorized attempt to delete image %s" msgstr "Unauthorized attempt to delete image: イメージ %s の削除は許可されていません。" -#: nova/objectstore/handler.py:452 +#: ../nova/objectstore/handler.py:455 #, python-format msgid "Deleted image: %s" msgstr "イメージ %s を削除しました。" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" -msgstr "適切なホストが見つかりません。" +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "ユーザ %r を検索します。" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" -msgstr "予備の(fallback)スケジューラを実装する必要があります。" +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Failed authorization: アクセスキー %s の認証に失敗しました。" -#: nova/scheduler/manager.py:69 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Casting to %s %s for %s" -msgstr "メッセージのcast: %s %s for %s" +msgid "No user found for access key %s" +msgstr "アクセスキー %s に対するユーザが見つかりませんでした。" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" -msgstr "全てのホストにコア数の空きがありません。" +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "ユーザ名 (%s) をプロジェクト名として使用します。" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" -msgstr "全てのホストが利用可能な容量(gigabytes)に達しています。" +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" +msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" -msgstr "全てのホストがネットワークの最大数に達しています。" +#: ../nova/auth/manager.py:279 +#, python-format +msgid "No project called %s could be found" +msgstr "プロジェクト %s は見つかりませんでした。" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." -msgstr "インスタンスのテストには実際の仮想環境が必要です。(fakeでは実行できません。)" +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" +msgstr "" -#: nova/tests/test_cloud.py:210 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Need to watch instance %s until it's running..." -msgstr "インスタンス %s が実行するまで監視します…" +msgid "User %(uid)s is not a member of project %(pjid)s" +msgstr "" -#: nova/tests/test_compute.py:104 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Running instances: %s" -msgstr "インスタンス %s は実行中です。" +msgid "Invalid signature for user %s" +msgstr "Invalid signature: ユーザ %s の署名が不正です。" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "署名が一致しません。" -#: nova/tests/test_compute.py:110 +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "プロジェクトを指定してください。" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "After terminating instances: %s" -msgstr "インスタンス %s を終了した後です。" +msgid "The %s role can not be found" +msgstr "ロール %s が見つかりません。" -#: nova/tests/test_rpc.py:89 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Nested received %s, %s" -msgstr "ネスとした受信: %s, %s" +msgid "The %s role is global only" +msgstr "ロール %s はグローバルでのみ使用可能です。" -#: nova/tests/test_rpc.py:94 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Nested return %s" -msgstr "ネストした戻り値: %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" +msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Received %s" -msgstr "%s を受信。" +msgid "Adding sitewide role %(role)s to user %(uid)s" +msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Target %s allocated" -msgstr "ターゲット %s をアロケートしました。" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" +msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" -msgstr "ハイパーバイザへの接続に失敗しました。" +#: ../nova/auth/manager.py:451 +#, python-format +msgid "Removing sitewide role %(role)s from user %(uid)s" +msgstr "" -#: nova/virt/fake.py:210 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Instance %s Not Found" -msgstr "インスタンス %s が見つかりません。" +msgid "Created project %(name)s with manager %(manager_user)s" +msgstr "" -#: nova/virt/hyperv.py:118 -msgid "In init host" -msgstr "In init host" +#: ../nova/auth/manager.py:533 +#, python-format +msgid "modifying project %s" +msgstr "modifying project: プロジェクト %s を更新します。" -#: nova/virt/hyperv.py:131 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Attempt to create duplicate vm %s" -msgstr "VM %s を二重に作成しようとしました。" +msgid "Adding user %(uid)s to project %(pid)s" +msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Starting VM %s " -msgstr "VM %s を開始します。 " +msgid "Remove user %(uid)s from project %(pid)s" +msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Started VM %s " -msgstr "VM %s を開始しました。 " +msgid "Deleting project %s" +msgstr "Deleting project: プロジェクト %s を削除します。" -#: nova/virt/hyperv.py:152 +#: ../nova/auth/manager.py:650 #, python-format -msgid "spawn vm failed: %s" -msgstr "vmの生成(spawn)に失敗しました: %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" +msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Failed to create VM %s" -msgstr "VM %s の作成に失敗しました。" +msgid "Deleting user %s" +msgstr "Deleting user: ユーザ %s を削除します。" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Created VM %s..." -msgstr "VM %s を作成します。" +msgid "Access Key change for user %s" +msgstr "Access Key change: ユーザ %s のアクセスキーを更新します。" -#: nova/virt/hyperv.py:188 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Set memory for vm %s..." -msgstr "vm %s のメモリを設定します。" +msgid "Secret Key change for user %s" +msgstr "Secret Key change: ユーザ %s のシークレットキーを更新します。" -#: nova/virt/hyperv.py:198 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Set vcpus for vm %s..." -msgstr "vm %s のvcpus を設定します。" +msgid "Admin status set to %(admin)r for user %(uid)s" +msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Creating disk for %s by attaching disk file %s" -msgstr "%s のディスクをディスクファイル %s をアタッチして作成します。" +msgid "No vpn data for project %s" +msgstr "プロジェクト %s に関するvpnデータがありません。" -#: nova/virt/hyperv.py:227 +#: ../nova/service.py:161 #, python-format -msgid "Failed to add diskdrive to VM %s" -msgstr "VM %s へのディスクドライブの追加に失敗しました。" +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "データベースにエントリの存在しないサービスを終了します。" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "サービスデータベースオブジェクトが消滅しました。再作成します。" -#: nova/virt/hyperv.py:230 +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "モデルサーバへの接続を復旧しました。" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "モデルサーバが消滅しました。" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "New disk drive path is %s" -msgstr "新しいドライブパスは %s です。" +msgid "LDAP user %s already exists" +msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "Failed to add vhd file to VM %s" -msgstr "vhdファイルの VM %s への追加に失敗しました。" +msgid "LDAP object for %s doesn't exist" +msgstr "LDAPオブジェクト %s が存在しません。" -#: nova/virt/hyperv.py:249 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "Created disk for %s" -msgstr "%s に diskを作成します。" +msgid "User %s doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "Creating nic for %s " -msgstr "%s にNICを作成します。 " +msgid "Group can't be created because group %s already exists" +msgstr "" -#: nova/virt/hyperv.py:272 -msgid "Failed creating a port on the external vswitch" -msgstr "外部vswitchへのポート作成に失敗しました。" +#: ../nova/auth/ldapdriver.py:478 +#, python-format +msgid "Group can't be created because user %s doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "Failed creating port for %s" -msgstr "ポート %s の作成に失敗しました。" +msgid "User %s can't be searched in group because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "Created switch port %s on switch %s" -msgstr "スイッチポート %s をスイッチ %s に作成しました。" +msgid "User %s can't be added to the group because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "Failed to add nic to VM %s" -msgstr "VM %s に対してNICの追加に失敗しました。" +msgid "The group at dn %s doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "Created nic for %s " -msgstr "%s のNICを作成しました。 " +msgid "User %(uid)s is already a member of the group %(group_dn)s" +msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/auth/ldapdriver.py:524 #, python-format -msgid "WMI job failed: %s" -msgstr "WMIジョブに失敗しました: %s" +msgid "" +"User %s can't be removed from the group because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/auth/ldapdriver.py:528 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " -msgstr "WMIジョブが成功しました: %s, 経過時間=%s " +msgid "User %s is not a member of the group" +msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "Got request to destroy vm %s" -msgstr "destroy vm %s リクエストを受信しました。" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "グループの最後のメンバーを削除しようとしました。代わりにグループ %s を削除してください。" -#: nova/virt/hyperv.py:383 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "Failed to destroy vm %s" -msgstr "vm %s の削除に失敗しました。" +msgid "User %s can't be removed from all because the user doesn't exist" +msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "Del: disk %s vm %s" -msgstr "Del: 削除: disk %s vm %s" +msgid "Group at dn %s doesn't exist" +msgstr "dnが %s のグループは存在しません。" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +msgid "Found non-unique network for bridge %s" +msgstr "ブリッジ %s に対してブリッジが複数存在します。" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "ブリッジ %s に対するネットワークが存在しません。" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "Creating new user: 新しいユーザ %s を作成します。" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "Deleting user: ユーザ %s を削除します。" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -"vm %s の情報の取得: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/api/ec2/admin.py:131 #, python-format -msgid "duplicate name found: %s" -msgstr "%s は重複しています。" +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/api/ec2/admin.py:137 #, python-format -msgid "Successfully changed vm state of %s to %s" -msgstr "vmの状態の %s から %s への変更に成功しました。" +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/api/ec2/admin.py:141 #, python-format -msgid "Failed to change vm state of %s to %s" -msgstr "VMの状態の %s から %s への変更に失敗しました。" +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "operation は add または remove の何れかである必要があります。" -#: nova/virt/images.py:70 +#: ../nova/api/ec2/admin.py:159 #, python-format -msgid "Finished retreving %s -- placed in %s" -msgstr "%s を取得しました。格納先: %s" +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/api/ec2/admin.py:177 #, python-format -msgid "Connecting to libvirt: %s" -msgstr "libvirt %s へ接続します。" +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" -msgstr "libvirtへの接続が切れています。" +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/api/ec2/admin.py:200 #, python-format -msgid "instance %s: deleting instance files %s" -msgstr "インスタンス %s: インスタンスファイル %s を削除しています。" +msgid "Delete project: %s" +msgstr "Delete project: プロジェクト %s を削除しました。" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/api/ec2/admin.py:214 #, python-format -msgid "No disk at %s" -msgstr "%s にディスクが存在しません。" +msgid "Adding user %(user)s to project %(project)s" +msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" -msgstr "インスタンスのスナップショットは現在libvirtに対してはサポートされていません。" +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" -#: nova/virt/libvirt_conn.py:294 #, python-format -msgid "instance %s: rebooted" -msgstr "インスタンス%s: 再起動しました。" +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "コマンド: %s\n" +#~ "終了コード: %s\n" +#~ "標準出力: %r\n" +#~ "標準エラー出力: %r" -#: nova/virt/libvirt_conn.py:297 #, python-format -msgid "_wait_for_reboot failed: %s" -msgstr "_wait_for_reboot 失敗: %s" +#~ msgid "(%s) publish (key: %s) %s" +#~ msgstr "(%s) パブリッシュ (key: %s) %s" -#: nova/virt/libvirt_conn.py:340 #, python-format -msgid "instance %s: rescued" -msgstr "インスタンス %s: rescued" +#~ msgid "Binding %s to %s with key %s" +#~ msgstr "%s を %s にキー %s でバインドします。" -#: nova/virt/libvirt_conn.py:343 #, python-format -msgid "_wait_for_rescue failed: %s" -msgstr "_wait_for_rescue 失敗: %s" +#~ msgid "Getting from %s: %s" +#~ msgstr "%s から %s を取得" -#: nova/virt/libvirt_conn.py:370 #, python-format -msgid "instance %s: is running" -msgstr "インスタンス %s を起動中です。" +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "AMQPサーバ %s:%d に接続できません。 %d 秒後に再度試みます。" -#: nova/virt/libvirt_conn.py:381 #, python-format -msgid "instance %s: booted" -msgstr "インスタンス %s: 起動しました。" +#~ msgid "Starting %s node" +#~ msgstr "ノード %s を開始します。" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 #, python-format -msgid "instance %s: failed to boot" -msgstr "インスタンス %s の起動に失敗しました。" +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "データストア %s に接続できません。 %d 秒後に再接続します。" -#: nova/virt/libvirt_conn.py:395 #, python-format -msgid "virsh said: %r" -msgstr "virsh の出力: %r" +#~ msgid "Couldn't get IP, using 127.0.0.1 %s" +#~ msgstr "IPを取得できません。127.0.0.1 を %s として使います。" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" -msgstr "デバイスです。" +#, python-format +#~ msgid "" +#~ "Access key %s has had %d failed authentications and will be locked out for " +#~ "%d minutes." +#~ msgstr "アクセスキー %s は %d 回認証に失敗したため、%d 分間ロックされます。" -#: nova/virt/libvirt_conn.py:407 #, python-format -msgid "data: %r, fpath: %r" -msgstr "データ:%r ファイルパス: %r" +#~ msgid "Authenticated Request For %s:%s)" +#~ msgstr "リクエストを認証しました: %s:%s" -#: nova/virt/libvirt_conn.py:415 #, python-format -msgid "Contents of file %s: %r" -msgstr "ファイル %s の中身: %r" +#~ msgid "arg: %s\t\tval: %s" +#~ msgstr "引数(arg): %s\t値(val): %s" -#: nova/virt/libvirt_conn.py:449 #, python-format -msgid "instance %s: Creating image" -msgstr "インスタンス %s のイメージを生成します。" +#~ msgid "Unauthorized request for controller=%s and action=%s" +#~ msgstr "許可されていないリクエスト: controller=%s, action %sです。" -#: nova/virt/libvirt_conn.py:505 #, python-format -msgid "instance %s: injecting key into image %s" -msgstr "インスタンス %s にキー %s をインジェクトします。" +#~ msgid "Adding role %s to user %s for project %s" +#~ msgstr "Adding role: ロール %s をユーザ %s、プロジェクト %s に追加します。" -#: nova/virt/libvirt_conn.py:508 #, python-format -msgid "instance %s: injecting net into image %s" -msgstr "インスタンス %s のネットワーク設定をイメージ %s にインジェクトします。" +#~ msgid "Adding sitewide role %s to user %s" +#~ msgstr "Adding sitewide role: サイトワイドのロール %s をユーザ %s に追加します。" -#: nova/virt/libvirt_conn.py:516 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" -msgstr "インスタンス %s: データをイメージ %s にインジェクトする際にエラーが発生しました。(%s)" +#~ msgid "Removing role %s from user %s for project %s" +#~ msgstr "Removing role: ロール %s をユーザ %s プロジェクト %s から削除します。" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 #, python-format -msgid "instance %s: starting toXML method" -msgstr "インスタンス %s: toXML メソッドを開始。" +#~ msgid "Removing sitewide role %s from user %s" +#~ msgstr "Removing sitewide role: サイトワイドのロール %s をユーザ %s から削除します。" -#: nova/virt/libvirt_conn.py:589 #, python-format -msgid "instance %s: finished toXML method" -msgstr "インスタンス %s: toXML メソッドを完了。" +#~ msgid "Getting x509 for user: %s on project: %s" +#~ msgstr "Getting X509: x509の取得: ユーザ %s, プロジェクト %s" -#: nova/virt/xenapi_conn.py:113 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" -msgstr "" -"connection_type=xenapi を使用するには、以下の指定が必要です: xenapi_connection_url, " -"xenapi_connection_username (オプション), xenapi_connection_password" +#, python-format +#~ msgid "Create project %s managed by %s" +#~ msgstr "Create project: プロジェクト %s (%s により管理される)を作成します。" -#: nova/virt/xenapi_conn.py:263 #, python-format -msgid "Task [%s] %s status: success %s" -msgstr "タスク [%s] %s ステータス: success %s" +#~ msgid "Adding user %s to project %s" +#~ msgstr "Adding user: ユーザ %s をプロジェクト %s に追加します。" -#: nova/virt/xenapi_conn.py:271 #, python-format -msgid "Task [%s] %s status: %s %s" -msgstr "タスク [%s] %s ステータス: %s %s" +#~ msgid "Removing user %s from project %s" +#~ msgstr "Removing user: ユーザ %s をプロジェクト %s から削除します。" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 #, python-format -msgid "Got exception: %s" -msgstr "例外 %s が発生しました。" +#~ msgid "Unsupported API request: controller = %s,action = %s" +#~ msgstr "サポートされていないAPIリクエストです。 controller = %s,action = %s" -#: nova/virt/xenapi/fake.py:72 #, python-format -msgid "%s: _db_content => %s" -msgstr "%s: _db_content => %s" +#~ msgid "Attach volume %s to instacne %s at %s" +#~ msgstr "Attach volume: ボリューム%s をインスタンス %s にデバイス %s でアタッチします。" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" -msgstr "NotImplemented 例外を発生させます。" +#, python-format +#~ msgid "Associate address %s to instance %s" +#~ msgstr "Associate address: アドレス %s をインスタンス %s に関連付けます。" -#: nova/virt/xenapi/fake.py:249 #, python-format -msgid "xenapi.fake does not have an implementation for %s" -msgstr "xenapi.fake には %s が実装されていません。" +#~ msgid "Registered image %s with id %s" +#~ msgstr "Registered image: イメージ %s をid %s で登録します。" -#: nova/virt/xenapi/fake.py:283 #, python-format -msgid "Calling %s %s" -msgstr "呼び出し: %s %s" +#~ msgid "User %s is already a member of the group %s" +#~ msgstr "ユーザ %s は既にグループ %s のメンバーです。" -#: nova/virt/xenapi/fake.py:288 #, python-format -msgid "Calling getter %s" -msgstr "getter %s をコールします。" +#~ msgid "failed authorization: no project named %s (user=%s)" +#~ msgstr "Failed authorization: 認証に失敗しました。プロジェクト名 %s (ユーザ = %s) は存在しません。" -#: nova/virt/xenapi/fake.py:340 #, python-format -msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" -msgstr "xenapi.fake に %s に関する実装がないか、引数の数が誤っています。" +#~ msgid "Failed authorization: user %s not admin and not member of project %s" +#~ msgstr "" +#~ "Failed authorization: 認証に失敗しました: ユーザ %s は管理者ではなくかつプロジェクト %s のメンバーではありません。" -#: nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "Found non-unique network for bridge %s" -msgstr "ブリッジ %s に対してブリッジが複数存在します。" +#~ msgid "User %s is not a member of project %s" +#~ msgstr "ユーザ %s はプロジェクト %s のメンバーではありません。" -#: nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "Found no network for bridge %s" -msgstr "ブリッジ %s に対するネットワークが存在しません。" +#~ msgid "Adding role %s to user %s in project %s" +#~ msgstr "Adding role: ロール %s をユーザ %s (プロジェクト %s の) に追加します。" -#: nova/virt/xenapi/vm_utils.py:127 #, python-format -msgid "Created VM %s as %s." -msgstr "VM %s を %s として作成しました。" +#~ msgid "Removing role %s from user %s on project %s" +#~ msgstr "Removing role: ロール %s をユーザ %s (プロジェクト %s の)から削除します。" -#: nova/virt/xenapi/vm_utils.py:147 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " -msgstr "VM %s, VDI %s のVBDを作成します… " +#~ msgid "Created project %s with manager %s" +#~ msgstr "Created project: プロジェクト %s (マネージャ %s)を作成します。" -#: nova/virt/xenapi/vm_utils.py:149 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." -msgstr "VBD %s を VM %s, VDI %s に対して作成しました。" +#~ msgid "Remove user %s from project %s" +#~ msgstr "Remove user: ユーザ %s をプロジェクト %s から削除します。" -#: nova/virt/xenapi/vm_utils.py:165 #, python-format -msgid "VBD not found in instance %s" -msgstr "インスタンス %s のVBDが見つかりません。" +#~ msgid "Created user %s (admin: %r)" +#~ msgstr "Created user: ユーザ %s (admin: %r) を作成しました。" -#: nova/virt/xenapi/vm_utils.py:175 #, python-format -msgid "Unable to unplug VBD %s" -msgstr "VBD %s の unplug に失敗しました。" +#~ msgid "Admin status set to %r for user %s" +#~ msgstr "Admin status set: 管理者ステータス %r をユーザ %s に設定します。" -#: nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Unable to destroy VBD %s" -msgstr "VBD %s の削除に失敗しました。" +#~ msgid "Quota exceeeded for %s, tried to run %s instances" +#~ msgstr "%s のクオータ上限を超えました。%s インスタンスを実行しようとしました。" -#: nova/virt/xenapi/vm_utils.py:202 #, python-format -msgid "Creating VIF for VM %s, network %s." -msgstr "VM %s, ネットワーク %s を作成します。" +#~ msgid "Casting to scheduler for %s/%s's instance %s" +#~ msgstr "スケジューラに対して %s/%s のインスタンス %s を送信します。" -#: nova/virt/xenapi/vm_utils.py:205 #, python-format -msgid "Created VIF %s for VM %s, network %s." -msgstr "VIF %s を VM %s, ネットワーク %s に作成しました。" +#~ msgid "Going to try and terminate %s" +#~ msgstr "%s を終了します。" -#: nova/virt/xenapi/vm_utils.py:216 #, python-format -msgid "Snapshotting VM %s with label '%s'..." -msgstr "VM %s のスナップショットをラベル '%s' で作成します。" +#~ msgid "Input partition size not evenly divisible by sector size: %d / %d" +#~ msgstr "インプットパーティションサイズがセクターサイズで割り切れません。 %d / %d" -#: nova/virt/xenapi/vm_utils.py:229 #, python-format -msgid "Created snapshot %s from VM %s." -msgstr "スナップショット %s を VM %s について作成しました。" +#~ msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +#~ msgstr "ローカルストレージのバイト数がセクターサイズで割り切れません: %d / %d" -#: nova/virt/xenapi/vm_utils.py:243 #, python-format -msgid "Asking xapi to upload %s as '%s'" -msgstr "xapiに対して %s を '%s' としてアップロードするように指示します。" +#~ msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +#~ msgstr "check_instance_lock: arguments: |%s| |%s| |%s|" -#: nova/virt/xenapi/vm_utils.py:261 #, python-format -msgid "Asking xapi to fetch %s as %s" -msgstr "xapi に対して %s を %s として取得するように指示します。" +#~ msgid "Disassociating address %s" +#~ msgstr "アドレス %s の関連付けを解除(disassociate)しています。" -#: nova/virt/xenapi/vm_utils.py:279 #, python-format -msgid "Looking up vdi %s for PV kernel" -msgstr "PV kernelのvdi %s を取得します。" +#~ msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +#~ msgstr "実行していないインスタンスの再起動を試みます。%s (状態: %s 期待する状態: %s)" -#: nova/virt/xenapi/vm_utils.py:290 #, python-format -msgid "PV Kernel in VDI:%d" -msgstr "VDIのPV Kernel: %d" +#~ msgid "" +#~ "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +#~ msgstr "実行していないインスタンスのスナップショット取得を試みます。%s (状態: %s 期待する状態: %s)" -#: nova/virt/xenapi/vm_utils.py:318 #, python-format -msgid "VDI %s is still available" -msgstr "VDI %s は依然として存在しています。" +#~ msgid "instance %s: attaching volume %s to %s" +#~ msgstr "attaching volume: インスタンス %s についてボリューム %s を %s にアタッチします。" -#: nova/virt/xenapi/vm_utils.py:331 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" -msgstr "(VM_UTILS) xenserver の vm state -> |%s|" +#~ msgid "instance %s: attach failed %s, removing" +#~ msgstr "インスタンス %s: %sのアタッチに失敗しました。リムーブします。" -#: nova/virt/xenapi/vm_utils.py:333 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" -msgstr "(VM_UTILS) xenapi の power_state -> |%s|" +#~ msgid "Detach volume %s from mountpoint %s on instance %s" +#~ msgstr "Detach volume: ボリューム %s をマウントポイント %s (インスタンス%s)からデタッチします。" -#: nova/virt/xenapi/vm_utils.py:390 #, python-format -msgid "VHD %s has parent %s" -msgstr "VHD %s のペアレントは %s です。" +#~ msgid "Cannot get blockstats for \"%s\" on \"%s\"" +#~ msgstr "ブロックデバイス \"%s\" の統計を \"%s\" について取得できません。" -#: nova/virt/xenapi/vm_utils.py:407 #, python-format -msgid "Re-scanning SR %s" -msgstr "SR %s を再スキャンします。" +#~ msgid "Cannot get ifstats for \"%s\" on \"%s\"" +#~ msgstr "インタフェース \"%s\" の統計を \"%s\" について取得できません。" -#: nova/virt/xenapi/vm_utils.py:431 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." -msgstr "ペアレント %s がオリジナルのペアレント %s と一致しません。合致するのを待ちます…" +#~ msgid "No service for %s, %s" +#~ msgstr "%s, %s のserviceが存在しません。" -#: nova/virt/xenapi/vm_utils.py:448 #, python-format -msgid "No VDIs found for VM %s" -msgstr "VM %s にVDIが存在しません。" +#~ msgid "No instance for id %s" +#~ msgstr "id %s のinstanceが存在しません。" -#: nova/virt/xenapi/vm_utils.py:452 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" -msgstr "予期しない数 (%s) のVDIがVM %s に存在します。" +#~ msgid "no keypair for user %s, name %s" +#~ msgstr "ユーザ %s, ネーム%s に該当するキーペアが存在しません。" -#: nova/virt/xenapi/vmops.py:62 #, python-format -msgid "Attempted to create non-unique name %s" -msgstr "ユニークではないname %s を作成しようとしました。" +#~ msgid "No volume for id %s" +#~ msgstr "id %s に該当するボリュームが存在しません。" -#: nova/virt/xenapi/vmops.py:99 #, python-format -msgid "Starting VM %s..." -msgstr "VM %s を開始します…" +#~ msgid "No security group named %s for project: %s" +#~ msgstr "セキュリティグループ名 %s がプロジェクト %s に存在しません。" -#: nova/virt/xenapi/vmops.py:101 #, python-format -msgid "Spawning VM %s created %s." -msgstr "VM %s の生成(spawning) により %s を作成しました。" +#~ msgid "Parallax returned HTTP error %d from request for /images" +#~ msgstr "Parallax がHTTPエラー%d を /images に対するリクエストに対して返しました。" -#: nova/virt/xenapi/vmops.py:112 #, python-format -msgid "Instance %s: booted" -msgstr "インスタンス%s: ブートしました。" +#~ msgid "Parallax returned HTTP error %d from request for /images/detail" +#~ msgstr "Parallax がHTTPエラー %d を /images/detail に対するリクエストに対して返しました" -#: nova/virt/xenapi/vmops.py:137 #, python-format -msgid "Instance not present %s" -msgstr "インスタンス%s が存在しません。" +#~ msgid "IP %s leased to bad mac %s vs %s" +#~ msgstr "IP %s が期待した mac %s ではなく %s にリースされました。" -#: nova/virt/xenapi/vmops.py:166 #, python-format -msgid "Starting snapshot for VM %s" -msgstr "VM %s に対するスナップショットを開始します。" +#~ msgid "IP %s released from bad mac %s vs %s" +#~ msgstr "IP %s がmac %s ではない mac %s への割当から開放されました。" -#: nova/virt/xenapi/vmops.py:174 #, python-format -msgid "Unable to Snapshot %s: %s" -msgstr "%s のスナップショットに失敗しました: %s" +#~ msgid "Getting object: %s / %s" +#~ msgstr "オブジェクトの取得: %s / %s" -#: nova/virt/xenapi/vmops.py:184 #, python-format -msgid "Finished snapshot and upload for VM %s" -msgstr "VM %s のスナップショットとアップロードが完了しました。" +#~ msgid "Unauthorized attempt to get object %s from bucket %s" +#~ msgstr "" +#~ "Unauthorized attempt to get object: オブジェクト %s のバケット %s からの取得は許可されていません。" -#: nova/virt/xenapi/vmops.py:252 #, python-format -msgid "suspend: instance not present %s" -msgstr "suspend: インスタンス %s は存在しません。" +#~ msgid "Putting object: %s / %s" +#~ msgstr "オブジェクトの格納:: %s / %s" -#: nova/virt/xenapi/vmops.py:262 #, python-format -msgid "resume: instance not present %s" -msgstr "resume: インスタンス %s は存在しません。" +#~ msgid "Unauthorized attempt to upload object %s to bucket %s" +#~ msgstr "" +#~ "Unauthorized attempt to upload: オブジェクト %s のバケット %s へのアップロードは許可されていません。" -#: nova/virt/xenapi/vmops.py:271 #, python-format -msgid "Instance not found %s" -msgstr "インスタンス %s が見つかりません。" +#~ msgid "Deleting object: %s / %s" +#~ msgstr "オブジェクトを削除しています。: %s / %s" -#: nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Introducing %s..." -msgstr "%s を introduce します…" +#~ msgid "Toggling publicity flag of image %s %r" +#~ msgstr "Toggling publicity flag: イメージ %s の公開フラグを %r に更新します。" -#: nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Introduced %s as %s." -msgstr "%s を %s として introduce しました。" +#~ msgid "Casting to %s %s for %s" +#~ msgstr "メッセージのcast: %s %s for %s" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" -msgstr "Storage Repository を作成できません。" +#, python-format +#~ msgid "Nested received %s, %s" +#~ msgstr "ネスとした受信: %s, %s" -#: nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Unable to find SR from VBD %s" -msgstr "VBD %s から SRを取得できません。" +#~ msgid "Creating disk for %s by attaching disk file %s" +#~ msgstr "%s のディスクをディスクファイル %s をアタッチして作成します。" -#: nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Forgetting SR %s ... " -msgstr "SR %s をforgetします。 " +#~ msgid "Created switch port %s on switch %s" +#~ msgstr "スイッチポート %s をスイッチ %s に作成しました。" -#: nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" -msgstr "例外 %s が %s のPBDを取得する際に発生しましたが無視します。" +#~ msgid "WMI job succeeded: %s, Elapsed=%s " +#~ msgstr "WMIジョブが成功しました: %s, 経過時間=%s " -#: nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" -msgstr "例外 %s が %s のPBDをunplugする際に発生しましたが無視します。" +#~ msgid "Del: disk %s vm %s" +#~ msgstr "Del: 削除: disk %s vm %s" -#: nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Forgetting SR %s done." -msgstr "SR %s のforgetが完了。" +#~ msgid "" +#~ "Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +#~ "cpu_time=%s" +#~ msgstr "" +#~ "vm %s の情報の取得: state=%s, mem=%s, num_cpu=%s, cpu_time=%s" -#: nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" -msgstr "例外 %s がSR %s をforgetする際に発生しましたが無視します。" +#~ msgid "Successfully changed vm state of %s to %s" +#~ msgstr "vmの状態の %s から %s への変更に成功しました。" -#: nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Unable to introduce VDI on SR %s" -msgstr "SR %s のVDIのintroduceができません。" +#~ msgid "Failed to change vm state of %s to %s" +#~ msgstr "VMの状態の %s から %s への変更に失敗しました。" -#: nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Unable to get record of VDI %s on" -msgstr "VDI %s のレコードを取得できません。" +#~ msgid "Finished retreving %s -- placed in %s" +#~ msgstr "%s を取得しました。格納先: %s" -#: nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Unable to introduce VDI for SR %s" -msgstr "SR %s のVDIをintroduceできません。" +#~ msgid "instance %s: deleting instance files %s" +#~ msgstr "インスタンス %s: インスタンスファイル %s を削除しています。" -#: nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Unable to obtain target information %s, %s" -msgstr "ターゲットの情報を取得できません。 %s, %s" +#~ msgid "data: %r, fpath: %r" +#~ msgstr "データ:%r ファイルパス: %r" -#: nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Mountpoint cannot be translated: %s" -msgstr "マウントポイントを変換できません。 %s" +#~ msgid "Contents of file %s: %r" +#~ msgstr "ファイル %s の中身: %r" -#: nova/virt/xenapi/volumeops.py:51 #, python-format -msgid "Attach_volume: %s, %s, %s" -msgstr "Attach_volume: ボリュームのアタッチ: %s, %s, %s" +#~ msgid "instance %s: injecting key into image %s" +#~ msgstr "インスタンス %s にキー %s をインジェクトします。" -#: nova/virt/xenapi/volumeops.py:69 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" -msgstr "SR %s にインスタンス %s のVDIを作成できません。" +#~ msgid "instance %s: injecting net into image %s" +#~ msgstr "インスタンス %s のネットワーク設定をイメージ %s にインジェクトします。" -#: nova/virt/xenapi/volumeops.py:81 #, python-format -msgid "Unable to use SR %s for instance %s" -msgstr "SR %s をインスタンス %s に対して利用できません。" +#~ msgid "instance %s: ignoring error injecting data into image %s (%s)" +#~ msgstr "インスタンス %s: データをイメージ %s にインジェクトする際にエラーが発生しました。(%s)" -#: nova/virt/xenapi/volumeops.py:93 #, python-format -msgid "Unable to attach volume to instance %s" -msgstr "インスタンス %s にボリュームをアタッチできません。" +#~ msgid "Task [%s] %s status: success %s" +#~ msgstr "タスク [%s] %s ステータス: success %s" -#: nova/virt/xenapi/volumeops.py:95 #, python-format -msgid "Mountpoint %s attached to instance %s" -msgstr "マウントポイント %s をインスタンス %s にアタッチしました。" +#~ msgid "Task [%s] %s status: %s %s" +#~ msgstr "タスク [%s] %s ステータス: %s %s" -#: nova/virt/xenapi/volumeops.py:106 #, python-format -msgid "Detach_volume: %s, %s" -msgstr "Detach_volume: ボリュームのデタッチ: %s, %s" +#~ msgid "%s: _db_content => %s" +#~ msgstr "%s: _db_content => %s" -#: nova/virt/xenapi/volumeops.py:113 #, python-format -msgid "Unable to locate volume %s" -msgstr "ボリューム %s の存在が確認できません。" +#~ msgid "Calling %s %s" +#~ msgstr "呼び出し: %s %s" -#: nova/virt/xenapi/volumeops.py:121 #, python-format -msgid "Unable to detach volume %s" -msgstr "ボリューム %s のデタッチができません。" +#~ msgid "Created VM %s as %s." +#~ msgstr "VM %s を %s として作成しました。" -#: nova/virt/xenapi/volumeops.py:128 #, python-format -msgid "Mountpoint %s detached from instance %s" -msgstr "マウントポイント %s をインスタンス %s からデタッチしました。" +#~ msgid "Creating VBD for VM %s, VDI %s ... " +#~ msgstr "VM %s, VDI %s のVBDを作成します… " -#: nova/volume/api.py:44 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" -msgstr "%sのクオータを超えています。サイズ %sG のボリュームの作成を行おうとしました。" +#~ msgid "Created VBD %s for VM %s, VDI %s." +#~ msgstr "VBD %s を VM %s, VDI %s に対して作成しました。" -#: nova/volume/api.py:46 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" -msgstr "ボリュームのクオータを超えています。%sの大きさのボリュームは作成できません。" +#~ msgid "Creating VIF for VM %s, network %s." +#~ msgstr "VM %s, ネットワーク %s を作成します。" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" -msgstr "ボリュームのステータス(status)が available でなければなりません。" +#, python-format +#~ msgid "Created VIF %s for VM %s, network %s." +#~ msgstr "VIF %s を VM %s, ネットワーク %s に作成しました。" -#: nova/volume/api.py:97 -msgid "Volume is already attached" -msgstr "ボリュームは既にアタッチされています(attached)。" +#, python-format +#~ msgid "Snapshotting VM %s with label '%s'..." +#~ msgstr "VM %s のスナップショットをラベル '%s' で作成します。" -#: nova/volume/api.py:103 -msgid "Volume is already detached" -msgstr "ボリュームは既にデタッチされています(detached)。" +#, python-format +#~ msgid "Created snapshot %s from VM %s." +#~ msgstr "スナップショット %s を VM %s について作成しました。" -#: nova/volume/driver.py:76 #, python-format -msgid "Recovering from a failed execute. Try number %s" -msgstr "実行失敗からリカバリーします。%s 回目のトライ。" +#~ msgid "Asking xapi to upload %s as '%s'" +#~ msgstr "xapiに対して %s を '%s' としてアップロードするように指示します。" -#: nova/volume/driver.py:85 #, python-format -msgid "volume group %s doesn't exist" -msgstr "ボリュームグループ%sが存在しません。" +#~ msgid "Asking xapi to fetch %s as %s" +#~ msgstr "xapi に対して %s を %s として取得するように指示します。" -#: nova/volume/driver.py:210 #, python-format -msgid "FAKE AOE: %s" -msgstr "偽のAOE: %s" +#~ msgid "PV Kernel in VDI:%d" +#~ msgstr "VDIのPV Kernel: %d" -#: nova/volume/driver.py:315 #, python-format -msgid "FAKE ISCSI: %s" -msgstr "偽のISCSI: %s" +#~ msgid "VHD %s has parent %s" +#~ msgstr "VHD %s のペアレントは %s です。" -#: nova/volume/manager.py:85 #, python-format -msgid "Re-exporting %s volumes" -msgstr "%s 個のボリュームを再エクスポートします。" +#~ msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +#~ msgstr "ペアレント %s がオリジナルのペアレント %s と一致しません。合致するのを待ちます…" -#: nova/volume/manager.py:93 #, python-format -msgid "volume %s: creating" -msgstr "ボリューム%sを作成します。" +#~ msgid "Unexpected number of VDIs (%s) found for VM %s" +#~ msgstr "予期しない数 (%s) のVDIがVM %s に存在します。" -#: nova/volume/manager.py:102 #, python-format -msgid "volume %s: creating lv of size %sG" -msgstr "ボリューム%sの%sGのlv (論理ボリューム) を作成します。" +#~ msgid "Spawning VM %s created %s." +#~ msgstr "VM %s の生成(spawning) により %s を作成しました。" -#: nova/volume/manager.py:106 #, python-format -msgid "volume %s: creating export" -msgstr "ボリューム %s をエクスポートします。" +#~ msgid "Unable to Snapshot %s: %s" +#~ msgstr "%s のスナップショットに失敗しました: %s" -#: nova/volume/manager.py:113 #, python-format -msgid "volume %s: created successfully" -msgstr "ボリューム %s の作成に成功しました。" +#~ msgid "suspend: instance not present %s" +#~ msgstr "suspend: インスタンス %s は存在しません。" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" -msgstr "ボリュームはアタッチされたままです。" +#, python-format +#~ msgid "resume: instance not present %s" +#~ msgstr "resume: インスタンス %s は存在しません。" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" -msgstr "ボリュームはこのノードのローカルではありません。" +#, python-format +#~ msgid "Instance not found %s" +#~ msgstr "インスタンス %s が見つかりません。" -#: nova/volume/manager.py:124 #, python-format -msgid "volume %s: removing export" -msgstr "ボリューム %s のエクスポートを解除します。" +#~ msgid "Introduced %s as %s." +#~ msgstr "%s を %s として introduce しました。" -#: nova/volume/manager.py:126 #, python-format -msgid "volume %s: deleting" -msgstr "ボリューム %s を削除します。" +#~ msgid "Ignoring exception %s when getting PBDs for %s" +#~ msgstr "例外 %s が %s のPBDを取得する際に発生しましたが無視します。" -#: nova/volume/manager.py:129 #, python-format -msgid "volume %s: deleted successfully" -msgstr "ボリューム %s の削除に成功しました。" +#~ msgid "Ignoring exception %s when unplugging PBD %s" +#~ msgstr "例外 %s が %s のPBDをunplugする際に発生しましたが無視します。" + +#, python-format +#~ msgid "Ignoring exception %s when forgetting SR %s" +#~ msgstr "例外 %s がSR %s をforgetする際に発生しましたが無視します。" + +#, python-format +#~ msgid "Unable to obtain target information %s, %s" +#~ msgstr "ターゲットの情報を取得できません。 %s, %s" + +#, python-format +#~ msgid "Attach_volume: %s, %s, %s" +#~ msgstr "Attach_volume: ボリュームのアタッチ: %s, %s, %s" + +#, python-format +#~ msgid "Unable to create VDI on SR %s for instance %s" +#~ msgstr "SR %s にインスタンス %s のVDIを作成できません。" + +#, python-format +#~ msgid "Unable to use SR %s for instance %s" +#~ msgstr "SR %s をインスタンス %s に対して利用できません。" + +#, python-format +#~ msgid "Mountpoint %s attached to instance %s" +#~ msgstr "マウントポイント %s をインスタンス %s にアタッチしました。" + +#, python-format +#~ msgid "Detach_volume: %s, %s" +#~ msgstr "Detach_volume: ボリュームのデタッチ: %s, %s" + +#, python-format +#~ msgid "Mountpoint %s detached from instance %s" +#~ msgstr "マウントポイント %s をインスタンス %s からデタッチしました。" + +#, python-format +#~ msgid "Quota exceeeded for %s, tried to create %sG volume" +#~ msgstr "%sのクオータを超えています。サイズ %sG のボリュームの作成を行おうとしました。" + +#, python-format +#~ msgid "Volume quota exceeded. You cannot create a volume of size %s" +#~ msgstr "ボリュームのクオータを超えています。%sの大きさのボリュームは作成できません。" + +#, python-format +#~ msgid "volume %s: creating lv of size %sG" +#~ msgstr "ボリューム%sの%sGのlv (論理ボリューム) を作成します。" diff --git a/po/pt_BR.po b/po/pt_BR.po index e57f7304a..887c32597 100644 --- a/po/pt_BR.po +++ b/po/pt_BR.po @@ -7,2142 +7,3021 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-02-03 20:32+0000\n" -"Last-Translator: André Gondim <andregondim@ubuntu.com>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-03-24 14:51+0000\n" +"Last-Translator: msinhore <msinhore@gmail.com>\n" "Language-Team: Brazilian Portuguese <pt_BR@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-25 05:22+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 -msgid "Filename of root CA" -msgstr "Nome do arquivo da CA raiz" +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "Hosts não encontrados" -#: nova/crypto.py:49 -msgid "Filename of private key" -msgstr "Nome do arquivo da chave privada" +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Erro inesperado ao executar o comando." -#: nova/crypto.py:51 -msgid "Filename of root Certificate Revokation List" -msgstr "Nome de arquivo da Lista de Revogação de Certificados" +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" -#: nova/crypto.py:53 -msgid "Where we keep our keys" -msgstr "Aonde armazenamos nossas chaves" +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" -#: nova/crypto.py:55 -msgid "Where we keep our root CA" -msgstr "Aonde mantemos nosso CA raiz" +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Exceção não capturada" -#: nova/crypto.py:57 -msgid "Should we use a CA for each project?" -msgstr "Devemos usar um CA para cada projeto?" +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "Cota excedida para %(pid)s, tentando criar volume com %(size)sG" -#: nova/crypto.py:61 +#: ../nova/volume/api.py:47 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "Volume quota exceeded. You cannot create a volume of size %sG" msgstr "" -"Sujeito do certificado para usuários, %s para projeto, usuário, timestamp" +"Cota excedida para o volume. Você não pode criar um volume com o tamanho %sG" -#: nova/crypto.py:66 -#, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" -msgstr "Sujeito do certificado para projetos, %s para projeto, timestamp" +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "O status do volume parece estar disponível" -#: nova/crypto.py:71 -#, python-format -msgid "Subject for certificate for vpns, %s for project, timestamp" -msgstr "Sujeito do certificado para vpns, %s para projeto, timestamp" +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "O Volume já está anexado" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "O Volume já está desanexado" -#: nova/crypto.py:258 +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "Falha ao ler o IP privado" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "Falha ao ler o(s) IP(s) público(s)" + +#: ../nova/api/openstack/servers.py:152 #, python-format -msgid "Flags path: %s" -msgstr "Caminho da sinalização: %s" +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "%(param)s propriedade não foi encontrada para %(_image_id)s" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "Erro inesperado ao executar o comando." +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "Os keypairs não foram definidos" -#: nova/exception.py:36 +#: ../nova/api/openstack/servers.py:238 #, python-format -msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Comando: %s\n" -"Código de retorno: %s\n" -"Stdout: %r\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Exceção não capturada" +msgid "Compute.api::lock %s" +msgstr "Compute.api::lock %s" -#: nova/fakerabbit.py:48 +#: ../nova/api/openstack/servers.py:253 #, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "(%s) publicar (key: %s) %s" +msgid "Compute.api::unlock %s" +msgstr "Compute.api::unlock %s" -#: nova/fakerabbit.py:53 +#: ../nova/api/openstack/servers.py:267 #, python-format -msgid "Publishing to route %s" -msgstr "Publicando para rota %s" +msgid "Compute.api::get_lock %s" +msgstr "Compute.api::get_lock %s" -#: nova/fakerabbit.py:83 +#: ../nova/api/openstack/servers.py:281 #, python-format -msgid "Declaring queue %s" -msgstr "Declarando fila %s" +msgid "Compute.api::reset_network %s" +msgstr "Compute.api::reset_network %s" -#: nova/fakerabbit.py:89 +#: ../nova/api/openstack/servers.py:292 #, python-format -msgid "Declaring exchange %s" -msgstr "Declarando troca %s" +msgid "Compute.api::pause %s" +msgstr "Compute.api::pause %s" -#: nova/fakerabbit.py:95 +#: ../nova/api/openstack/servers.py:303 #, python-format -msgid "Binding %s to %s with key %s" -msgstr "Atribuindo %s para %s com chave %s" +msgid "Compute.api::unpause %s" +msgstr "Compute.api::unpause %s" -#: nova/fakerabbit.py:120 +#: ../nova/api/openstack/servers.py:314 #, python-format -msgid "Getting from %s: %s" -msgstr "Obtendo de %s: %s" +msgid "compute.api::suspend %s" +msgstr "compute.api::suspend %s" -#: nova/rpc.py:92 +#: ../nova/api/openstack/servers.py:325 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "" -"Servidor AMQP em %s:%d inatingível. Tentando novamente em %d segundos." +msgid "compute.api::resume %s" +msgstr "compute.api::resume %s" -#: nova/rpc.py:99 +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "Número errado de argumentos." + +#: ../nova/twistd.py:209 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgid "pidfile %s does not exist. Daemon not running?\n" msgstr "" -"Não foi possível conectar ao servidor AMQP após %d tentativas. Desligando." +"Arquivo do id do processo (pidfile) %s não existe. O Daemon está parado?\n" -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "Reconectado à fila" +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "Processo inexistente" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "Falha ao obter mensagem da fila" +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "Servindo %s" -#: nova/rpc.py:155 +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "Conjunto completo de FLAGS:" + +#: ../nova/twistd.py:266 #, python-format -msgid "Initing the Adapter Consumer for %s" -msgstr "Iniciando o Adaptador Consumidor para %s" +msgid "Starting %s" +msgstr "Iniciando %s" -#: nova/rpc.py:170 +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 #, python-format -msgid "received %s" -msgstr "recebido %s" +msgid "Instance %s not found" +msgstr "Instancia %s não encontrada" -#: nova/rpc.py:183 +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 #, python-format -msgid "no method for message: %s" -msgstr "sem método para mensagem: %s" +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" -#: nova/rpc.py:184 +#: ../nova/virt/xenapi/volumeops.py:69 #, python-format -msgid "No method for message: %s" -msgstr "Sem método para mensagem: %s" +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Não é possível criar o VDI no SR %(sr_ref)s para a instância " +"%(instance_name)s" -#: nova/rpc.py:245 +#: ../nova/virt/xenapi/volumeops.py:80 #, python-format -msgid "Returning exception %s to caller" -msgstr "Retornando exceção %s ao método de origem" +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" +"Não é possível usar o SR %(sr_ref)s para a instância %(instance_name)s" -#: nova/rpc.py:286 +#: ../nova/virt/xenapi/volumeops.py:91 #, python-format -msgid "unpacked context: %s" -msgstr "conteúdo descompactado: %s" +msgid "Unable to attach volume to instance %s" +msgstr "Não é possível anexar o volume na instância %s" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "Fazendo chamada assíncrona..." +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" +"Ponto de montagem %(mountpoint)s conectada à instância %(instance_name)s" -#: nova/rpc.py:308 +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID é %s" +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "Detach_volume: %(instance_name)s, %(mountpoint)s" -#: nova/rpc.py:356 +#: ../nova/virt/xenapi/volumeops.py:112 #, python-format -msgid "response %s" -msgstr "resposta %s" +msgid "Unable to locate volume %s" +msgstr "Não é possível localizar o volume %s" -#: nova/rpc.py:365 +#: ../nova/virt/xenapi/volumeops.py:120 #, python-format -msgid "topic is %s" -msgstr "topico é %s" +msgid "Unable to detach volume %s" +msgstr "Não é possível desconectar o volume %s" -#: nova/rpc.py:366 +#: ../nova/virt/xenapi/volumeops.py:127 #, python-format -msgid "message %s" -msgstr "mensagem %s" +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" +"Ponto de montagem %(mountpoint)s desanexada da instância %(instance_name)s" -#: nova/service.py:157 +#: ../nova/compute/instance_types.py:41 #, python-format -msgid "Starting %s node" -msgstr "Iniciando nó %s" +msgid "Unknown instance type: %s" +msgstr "Tipo de instância desconhecido: %s" -#: nova/service.py:169 -msgid "Service killed that has no database entry" -msgstr "Encerrado serviço que não tem entrada na base de dados" +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "Nome do arquivo da CA raiz" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." -msgstr "O objeto da base de dados do serviço desapareceu, Recriando." +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Nome do arquivo da chave privada" -#: nova/service.py:202 -msgid "Recovered model server connection!" -msgstr "Recuperada conexão servidor de modelo." +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "Nome de arquivo da Lista de Revogação de Certificados" -#: nova/service.py:208 -msgid "model server went away" -msgstr "servidor de modelo perdido" +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Aonde armazenamos nossas chaves" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "Onde mantemos nosso CA raiz" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "Devemos usar um CA para cada projeto?" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/crypto.py:61 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" -"Repositório de dados %s não pode ser atingido. Tentando novamente em %d " -"segundos." +"Assunto do certificado para usuários, %s para projeto, usuário, timestamp" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/crypto.py:66 #, python-format -msgid "Serving %s" -msgstr "Servindo %s" +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "Assunto do certificado para projetos, %s para projeto, timestamp" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "Conjunto completo de FLAGS:" +#: ../nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "Assunto do certificado para vpns, %s para projeto, timestamp" -#: nova/twistd.py:211 +#: ../nova/crypto.py:258 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" +msgid "Flags path: %s" +msgstr "Localização dos sinalizadores: %s" + +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "Moldagem para %(topic)s %(host)s para %(method)s" + +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "check_instance_lock: decorating: |%s|" + +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" msgstr "" -"Arquivo de id de processo (pidfile) %s não existe. Daemon não está " -"executando?\n" +"check_instance_lock: argumentos: |%(self)s| |%(context)s| |%(instance_id)s|" -#: nova/twistd.py:268 +#: ../nova/compute/manager.py:84 #, python-format -msgid "Starting %s" -msgstr "Iniciando %s" +msgid "check_instance_lock: locked: |%s|" +msgstr "check_instance_lock: locked: |%s|" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Inner Exception: %s" -msgstr "Exceção interna: %s" +msgid "check_instance_lock: admin: |%s|" +msgstr "check_instance_lock: admin: |%s|" -#: nova/utils.py:54 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Class %s cannot be found" -msgstr "Classe %s não pode ser encontrada" +msgid "check_instance_lock: executing: |%s|" +msgstr "check_instance_lock: executando: |%s|" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Fetching %s" -msgstr "Obtendo %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "check_instance_lock: not executando |%s|" -#: nova/utils.py:125 +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "A instância já foi criada" + +#: ../nova/compute/manager.py:180 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "Executando comando (subprocesso): %s" +msgid "instance %s: starting..." +msgstr "instância %s: iniciando..." -#: nova/utils.py:138 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Result was %s" -msgstr "Resultado foi %s" +msgid "instance %s: Failed to spawn" +msgstr "instância %s: falha na geração" -#: nova/utils.py:171 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "debug in callback: %s" -msgstr "debug em callback: %s" +msgid "Terminating instance %s" +msgstr "Terminando a instância %s" -#: nova/utils.py:176 +#: ../nova/compute/manager.py:255 #, python-format -msgid "Running %s" -msgstr "Executando %s" +msgid "Deallocating address %s" +msgstr "Desalocando o endereço %s" -#: nova/utils.py:207 +#: ../nova/compute/manager.py:268 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "Não foi possível obter IP, usando 127.0.0.1 %s" +msgid "trying to destroy already destroyed instance: %s" +msgstr "tentando destruir instância já destruida: %s" -#: nova/utils.py:289 +#: ../nova/compute/manager.py:282 #, python-format -msgid "Invalid backend: %s" -msgstr "Backend inválido: %s" +msgid "Rebooting instance %s" +msgstr "Reiniciando a instância %s" -#: nova/utils.py:300 +#: ../nova/compute/manager.py:287 #, python-format -msgid "backend %s" -msgstr "backend %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" +"tentando reiniciar uma instancia com estado diferente de running: " +"%(instance_id)s (estado: %(state)s esperado: %(running)s)" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "Muitas falhas de autenticação." +#: ../nova/compute/manager.py:311 +#, python-format +msgid "instance %s: snapshotting" +msgstr "instância %s: fazendo um snapshot" -#: nova/api/ec2/__init__.py:142 +#: ../nova/compute/manager.py:316 #, python-format msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -"Chave de acesso %s tem %d falhas de autenticação e vai ser bloqueada por %d " -"minutos." +"tentando fazer um snapshot de uma instância com estado diferente de running: " +" %(instance_id)s (estado: %(state)s esperado: %(running)s)" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/compute/manager.py:332 #, python-format -msgid "Authentication Failure: %s" -msgstr "Falha de Autenticação: %s" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" +"tentando limpar a senha de uma instância com estado diferente de running: " +"%(instance_id)s (estado: %(instance_state)s esperado: %(expected_state)s)" -#: nova/api/ec2/__init__.py:190 +#: ../nova/compute/manager.py:335 #, python-format -msgid "Authenticated Request For %s:%s)" -msgstr "Pedido de Autenticação Para: %s:%s" +msgid "instance %s: setting admin password" +msgstr "instância %s: configurando a senha do administrador" -#: nova/api/ec2/__init__.py:227 +#: ../nova/compute/manager.py:353 #, python-format -msgid "action: %s" -msgstr "ação: %s" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" +"tentando injetar o arquivo dentro de uma instância com estado diferente de " +"running: %(instance_id)s (estado: %(instance_state)s esperado: " +"%(expected_state)s)" -#: nova/api/ec2/__init__.py:229 +#: ../nova/compute/manager.py:362 #, python-format -msgid "arg: %s\t\tval: %s" -msgstr "argumento: %s\t\tvalor: %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "instância %(nm)s: injetando um arquivo para %(plain_path)s" -#: nova/api/ec2/__init__.py:301 +#: ../nova/compute/manager.py:372 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" -msgstr "Requisição não autorizada para controlador=%s e ação=%s" +msgid "instance %s: rescuing" +msgstr "instância %s: resgatando" -#: nova/api/ec2/__init__.py:339 +#: ../nova/compute/manager.py:387 #, python-format -msgid "NotFound raised: %s" -msgstr "NotFound lançado: %s" +msgid "instance %s: unrescuing" +msgstr "instância %s: desfazendo o resgate" -#: nova/api/ec2/__init__.py:342 +#: ../nova/compute/manager.py:406 #, python-format -msgid "ApiError raised: %s" -msgstr "ApiError lançado: %s" +msgid "instance %s: pausing" +msgstr "instância %s: pausando" -#: nova/api/ec2/__init__.py:349 +#: ../nova/compute/manager.py:423 #, python-format -msgid "Unexpected error raised: %s" -msgstr "Erro inexperado lançado: %s" +msgid "instance %s: unpausing" +msgstr "instância %s: saindo do pause" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." -msgstr "" -"Ocorreu um erro desconhecido. Por favor tente sua requisição novamente." +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "instância %s: recuperando os diagnósticos" -#: nova/api/ec2/admin.py:84 +#: ../nova/compute/manager.py:453 #, python-format -msgid "Creating new user: %s" -msgstr "Criando novo usuário: %s" +msgid "instance %s: suspending" +msgstr "instância %s: suspendendo" -#: nova/api/ec2/admin.py:92 +#: ../nova/compute/manager.py:472 #, python-format -msgid "Deleting user: %s" -msgstr "Excluindo usuário: %s" +msgid "instance %s: resuming" +msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/compute/manager.py:491 #, python-format -msgid "Adding role %s to user %s for project %s" -msgstr "Adicionando papel %s ao usuário %s para o projeto %s" +msgid "instance %s: locking" +msgstr "instância %s: bloqueando" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/compute/manager.py:503 #, python-format -msgid "Adding sitewide role %s to user %s" -msgstr "Adicionando papel em todo site %s ao usuário %s" +msgid "instance %s: unlocking" +msgstr "instância %s: desbloqueando" -#: nova/api/ec2/admin.py:122 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Removing role %s from user %s for project %s" -msgstr "Removendo papel %s do usuário %s para o projeto %s" +msgid "instance %s: getting locked state" +msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Removing sitewide role %s from user %s" -msgstr "Removendo papel %s em todo site do usuário %s" +msgid "instance %s: reset network" +msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" -msgstr "operações devem ser adicionar e excluir" +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "Obter saída do console para instância %s" -#: nova/api/ec2/admin.py:142 +#: ../nova/compute/manager.py:543 #, python-format -msgid "Getting x509 for user: %s on project: %s" -msgstr "Obtendo x509 para usuário: %s do projeto: %s" +msgid "instance %s: getting ajax console" +msgstr "instância %s: obtendo console ajax" -#: nova/api/ec2/admin.py:159 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Create project %s managed by %s" -msgstr "Criar projeto %s gerenciado por %s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" -#: nova/api/ec2/admin.py:170 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Delete project: %s" -msgstr "Excluir projeto: %s" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Adding user %s to project %s" -msgstr "Adicionando usuário %s ao projeto %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" + +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Removing user %s from project %s" -msgstr "Excluindo usuário %s do projeto %s" +msgid "Host %s is not alive" +msgstr "Host %s não está ativo" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" -msgstr "Requisição de API não suportada: controlador = %s,ação = %s" +msgid "Host %s not available" +msgstr "O host %s não está disponível" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "Todos os hosts tem muitos gigabytes" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "Todos os hosts tem muitas interfaces de rede" -#: nova/api/ec2/cloud.py:117 +#: ../nova/volume/manager.py:85 #, python-format -msgid "Generating root CA: %s" -msgstr "Gerando CA raiz: %s" +msgid "Re-exporting %s volumes" +msgstr "Re-exportando %s volumes" -#: nova/api/ec2/cloud.py:277 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Create key pair %s" -msgstr "Criar par de chaves %s" +msgid "volume %s: skipping export" +msgstr "volume %s: ignorando export" -#: nova/api/ec2/cloud.py:285 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Delete key pair %s" -msgstr "Remover par de chaves %s" +msgid "volume %s: creating" +msgstr "volume %s: criando" -#: nova/api/ec2/cloud.py:357 +#: ../nova/volume/manager.py:108 #, python-format -msgid "%s is not a valid ipProtocol" -msgstr "%s não é um ipProtocol válido" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "volume %(vol_name)s: criando lv com tamanho %(vol_size)sG" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" -msgstr "Intervalo de porta inválido" +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "volume %s: criando o export" -#: nova/api/ec2/cloud.py:392 +#: ../nova/volume/manager.py:123 #, python-format -msgid "Revoke security group ingress %s" -msgstr "Revogado entrada do grupo de segurança %s" +msgid "volume %s: created successfully" +msgstr "volume %s: criado com sucesso" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." -msgstr "Não existe regra para os parâmetros especificados" +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "O volume continua atachado" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "O volume não pertence à este node" -#: nova/api/ec2/cloud.py:421 +#: ../nova/volume/manager.py:136 #, python-format -msgid "Authorize security group ingress %s" -msgstr "Autorizada entrada do grupo de segurança %s" +msgid "volume %s: removing export" +msgstr "volume %s: removendo export" -#: nova/api/ec2/cloud.py:432 +#: ../nova/volume/manager.py:138 #, python-format -msgid "This rule already exists in group %s" -msgstr "Esta regra já existe no grupo %s" +msgid "volume %s: deleting" +msgstr "volume %s: removendo" -#: nova/api/ec2/cloud.py:460 +#: ../nova/volume/manager.py:147 #, python-format -msgid "Create Security Group %s" -msgstr "Criar Grupo de Segurança %s" +msgid "volume %s: deleted successfully" +msgstr "volume %s: remoção realizada com sucesso" -#: nova/api/ec2/cloud.py:463 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "group %s already exists" -msgstr "group %s já existe" +msgid "%(text)s: _db_content => %(content)s" +msgstr "%(text)s: _db_content => %(content)s" -#: nova/api/ec2/cloud.py:475 +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "Aumento não implementado" + +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "Delete security group %s" -msgstr "Excluir grupo de segurança %s" +msgid "xenapi.fake does not have an implementation for %s" +msgstr "xenapi.fake não tem uma implementação para %s" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "Get console output for instance %s" -msgstr "Obter saída do console para instância %s" +msgid "Calling %(localname)s %(impl)s" +msgstr "Chamando %(localname)s %(impl)s" -#: nova/api/ec2/cloud.py:543 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "Create volume of %s GB" -msgstr "Criar volume de %s GB" +msgid "Calling getter %s" +msgstr "Chamando o pai %s" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "Attach volume %s to instacne %s at %s" -msgstr "Anexar volume %s para instância %s em %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" +"xenapi.fake não tem implementação para %s ou isto foi chamado com um número " +"de argumentos inválidos" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "Não é possível testar instâncias sem um ambiente virtual real" -#: nova/api/ec2/cloud.py:579 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Detach volume %s" -msgstr "Desanexar volume %s" +msgid "Need to watch instance %s until it's running..." +msgstr "É necessário assistir a instância %s até ela estar rodando..." -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" -msgstr "Alocar endereço" +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "Falha ao abrir a conexão com o hypervisor" -#: nova/api/ec2/cloud.py:691 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Release address %s" -msgstr "Liberar endereço %s" +msgid "Starting VLAN inteface %s" +msgstr "Iniciando a VLAN %s" -#: nova/api/ec2/cloud.py:696 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Associate address %s to instance %s" -msgstr "Atribuir endereço %s à instância %s" +msgid "Starting Bridge interface for %s" +msgstr "Iniciando a Bridge para %s" -#: nova/api/ec2/cloud.py:703 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Disassociate address %s" -msgstr "Desatribuir endereço %s" +msgid "Hupping dnsmasq threw %s" +msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" -msgstr "Começando a terminar instâncias" +#: ../nova/network/linux_net.py:316 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "Pid %d está ultrapassado, reiniciando dnsmasq" -#: nova/api/ec2/cloud.py:738 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 #, python-format -msgid "Reboot instance %r" -msgstr "Reiniciar instância %r" +msgid "killing radvd threw %s" +msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "De-registering image %s" -msgstr "Removendo o registro da imagem %s" +msgid "Pid %d is stale, relaunching radvd" +msgstr "Pid %d está ultrapassado, reiniciando radvd" -#: nova/api/ec2/cloud.py:783 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Registered image %s with id %s" -msgstr "Registrada imagem %s com id %s" +msgid "Killing dnsmasq threw %s" +msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/utils.py:58 #, python-format -msgid "attribute not supported: %s" -msgstr "atributo não suportado: %s" +msgid "Inner Exception: %s" +msgstr "Exceção interna: %s" -#: nova/api/ec2/cloud.py:794 +#: ../nova/utils.py:59 #, python-format -msgid "invalid id: %s" -msgstr "id inválido: %s" +msgid "Class %s cannot be found" +msgstr "Classe %s não pode ser encontrada" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" -msgstr "usuário ou grupo não especificado" +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" +msgstr "Obtendo %s" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" -msgstr "apenas o grupo \"all\" é suportado" +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "Executando comando (subprocesso): %s" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" -msgstr "operation_type deve ser add ou remove" +#: ../nova/utils.py:143 ../nova/utils.py:183 +#, python-format +msgid "Result was %s" +msgstr "Resultado foi %s" -#: nova/api/ec2/cloud.py:812 +#: ../nova/utils.py:159 #, python-format -msgid "Updating image %s publicity" -msgstr "Atualizando publicidade da imagem %s" +msgid "Running cmd (SSH): %s" +msgstr "Rodando o comando (SSH): %s" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/utils.py:217 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Falha ao obter metadados para o ip: %s" +msgid "debug in callback: %s" +msgstr "debug em callback: %s" -#: nova/api/openstack/__init__.py:70 +#: ../nova/utils.py:222 #, python-format -msgid "Caught error: %s" -msgstr "Capturado o erro: %s" +msgid "Running %s" +msgstr "Executando %s" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." -msgstr "Incluindo operações administrativas na API." +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "Endereço para Link Local não encontrado: %s" -#: nova/api/openstack/servers.py:184 +#: ../nova/utils.py:265 #, python-format -msgid "Compute.api::lock %s" -msgstr "Compute.api::lock %s" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" +"Não foi possível atribuir um IP para o Link Local de %(interface)s :%(ex)s" -#: nova/api/openstack/servers.py:199 +#: ../nova/utils.py:363 #, python-format -msgid "Compute.api::unlock %s" -msgstr "Compute.api::unlock %s" +msgid "Invalid backend: %s" +msgstr "Backend inválido: %s" -#: nova/api/openstack/servers.py:213 +#: ../nova/utils.py:374 #, python-format -msgid "Compute.api::get_lock %s" -msgstr "Compute.api::get_lock %s" +msgid "backend %s" +msgstr "backend %s" -#: nova/api/openstack/servers.py:224 +#: ../nova/fakerabbit.py:49 #, python-format -msgid "Compute.api::pause %s" -msgstr "Compute.api::pause %s" +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "(%(nm)s) publicar (key: %(routing_key)s) %(message)s" -#: nova/api/openstack/servers.py:235 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "Compute.api::unpause %s" -msgstr "Compute.api::unpause %s" +msgid "Publishing to route %s" +msgstr "Publicando para rota %s" -#: nova/api/openstack/servers.py:246 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "compute.api::suspend %s" -msgstr "compute.api::suspend %s" +msgid "Declaring queue %s" +msgstr "Declarando fila %s" -#: nova/api/openstack/servers.py:257 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "compute.api::resume %s" -msgstr "compute.api::resume %s" +msgid "Declaring exchange %s" +msgstr "Declarando troca %s" -#: nova/auth/dbdriver.py:84 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "User %s already exists" -msgstr "Usuário %s já existe" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "Ligação %(queue)s para %(exchange)s com chave %(routing_key)s" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Project can't be created because manager %s doesn't exist" -msgstr "Projeto não pode ser criado porque o gerente %s não existe." +msgid "Getting from %(queue)s: %(message)s" +msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Project can't be created because project %s already exists" -msgstr "Projeto não pode ser criado porque o projeto %s já existe." +msgid "Created VM %s..." +msgstr "VM %s criada..." -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:138 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" -msgstr "Projeto não pode ser modificado porque o gerente %s não existe." +msgid "Created VM %(instance_name)s as %(vm_ref)s." +msgstr "VM %(instance_name)s criada como %(vm_ref)s." -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "User \"%s\" not found" -msgstr "Usuário \"%s\" não encontrado" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " +msgstr "Criando VBD para VM %(vm_ref)s, VDI %(vdi_ref)s ... " -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Project \"%s\" not found" -msgstr "Projeto \"%s\" não encontrado" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." +msgstr "VBD %(vbd_ref)s criado para VM %(vm_ref)s, VDI %(vdi_ref)s." -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" -msgstr "Tentativa de instanciar singleton" +#: ../nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "VBD not found in instance %s" +msgstr "O VBD não foi encontrado na instância %s" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "LDAP object for %s doesn't exist" -msgstr "Objeto LDAP para %s não existe" +msgid "Unable to unplug VBD %s" +msgstr "Não é possível desconectar o VBD %s" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Project can't be created because user %s doesn't exist" -msgstr "Projeto não pode ser criado porque o usuário %s não existe" +msgid "Unable to destroy VBD %s" +msgstr "Não é possível destruir o VBD %s" + +#: ../nova/virt/xenapi/vm_utils.py:224 +#, python-format +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." +msgstr "Criando a VIF para VM %(vm_ref)s, rede %(network_ref)s." -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "User %s is already a member of the group %s" -msgstr "Usuário %s já pertence ao grupo %s" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." +msgstr "VIF %(vif_ref)s criada para VM %(vm_ref)s, rede %(network_ref)s." -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -"Tentatica de remover o último membto de um grupo. Ao invés disso excluindo o " -"grupo %s." +"VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) no SR " +"%(sr_ref)s criada com sucesso." -#: nova/auth/ldapdriver.py:528 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "Group at dn %s doesn't exist" -msgstr "Grupo no dn %s não existe" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "Fazendo um snapshot da VM %(vm_ref)s com rótulo '%(label)s'..." -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Looking up user: %r" -msgstr "Procurando usuário: %r" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "Snapshot %(template_vm_ref)s criado a partir da VM %(vm_ref)s." -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "Failed authorization for access key %s" -msgstr "Falha de autorização para chave de acesso %s" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" +"Solicitando à xapi para realizar upload da imagem %(vdi_uuids)s com ID " +"%(image_id)s" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "No user found for access key %s" -msgstr "Nenhum usuário encontrado para chave de acesso %s" +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "Tamanho da imagem %(image)s:%(virtual_size)d" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Using project name = user name (%s)" -msgstr "Usando nome do projeto = nome do usuário (%s)" +msgid "Glance image %s" +msgstr "" -#: nova/auth/manager.py:275 +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 #, python-format -msgid "failed authorization: no project named %s (user=%s)" -msgstr "falha de autorização: nenhum projeto de nome %s (usuário=%s)" +msgid "Copying VDI %s to /boot/guest on dom0" +msgstr "Copiando o VDI %s de /boot/guest no dom0" -#: nova/auth/manager.py:277 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "No project called %s could be found" -msgstr "Nenhum projeto chamado %s pode ser encontrado." +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "Kernel/Ramdisk %s destruidos" -#: nova/auth/manager.py:281 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -"Falha de autorização: usuário %s não é administrador nem membro do projeto %s" -#: nova/auth/manager.py:283 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "User %s is not a member of project %s" -msgstr "Usuário %s não é membro do projeto %s" +msgid "Looking up vdi %s for PV kernel" +msgstr "Verificando o vdi %s para kernel PV" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Invalid signature for user %s" -msgstr "Assinatura inválida para usuário %s" +msgid "PV Kernel in VDI:%s" +msgstr "Kernel PV no VDI: %s" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" -msgstr "Assinatura não confere" - -#: nova/auth/manager.py:374 -msgid "Must specify project" -msgstr "Deve especificar projeto" +#: ../nova/virt/xenapi/vm_utils.py:405 +#, python-format +msgid "Running pygrub against %s" +msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "The %s role can not be found" -msgstr "O papel %s não foi encontrado" +msgid "Found Xen kernel %s" +msgstr "Kernel Xen encontrado: %s" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "Kernel Xen não encontrado. Iniciando como HVM." -#: nova/auth/manager.py:410 +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "The %s role is global only" -msgstr "O papel %s é apenas global" +msgid "duplicate name found: %s" +msgstr "nome duplicado encontrado: %s" -#: nova/auth/manager.py:412 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "Adding role %s to user %s in project %s" -msgstr "Adicionando papel %s ao usuário %s no projeto %s" +msgid "VDI %s is still available" +msgstr "O VDI %s continua disponível" -#: nova/auth/manager.py:438 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Removing role %s from user %s on project %s" -msgstr "Removendo papel %s do usuário %s no projeto %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "(VM_UTILS) xenserver vm state -> |%s|" -#: nova/auth/manager.py:505 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "Created project %s with manager %s" -msgstr "Criado projeto %s com gerente %s" +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "(VM_UTILS) xenapi power_state -> |%s|" -#: nova/auth/manager.py:523 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "modifying project %s" -msgstr "modificando projeto %s" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" +msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Remove user %s from project %s" -msgstr "Remover usuário %s do projeto %s" +msgid "Re-scanning SR %s" +msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Deleting project %s" -msgstr "Excluindo projeto %s" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Created user %s (admin: %r)" -msgstr "Criado usuário %s (administrador: %r)" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Deleting user %s" -msgstr "Apagando usuário %s" +msgid "No VDIs found for VM %s" +msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/virt/xenapi/vm_utils.py:594 #, python-format -msgid "Access Key change for user %s" +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "Secret Key change for user %s" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "Admin status set to %r for user %s" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "No vpn data for project %s" +msgid "Plugging VBD %s ... " msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 +#, python-format +msgid "Plugging VBD %s done." msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/virt/xenapi/vm_utils.py:661 +#, python-format +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/virt/xenapi/vm_utils.py:664 +#, python-format +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "Launching VPN for %s" -msgstr "Executando VPN para %s" +msgid "Destroying VBD for VDI %s ... " +msgstr "" -#: nova/compute/api.py:67 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "Destroying VBD for VDI %s done." msgstr "" -#: nova/compute/api.py:73 +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Instance %d has no host" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/compute/api.py:94 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/virt/xenapi/vm_utils.py:747 +#, python-format +msgid "Writing partition table %s done." msgstr "" -#: nova/compute/api.py:156 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "Going to run %s instances..." +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "Nested return %s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 #, python-format -msgid "Going to try and terminate %s" +msgid "Received %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "Instance %d was not found during terminate" +msgid "No service for id %s" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "Instance %d is already being terminated" +msgid "No service for %(host)s, %(binary)s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "No floating ip for address %s" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/db/sqlalchemy/api.py:629 +#, python-format +msgid "No address for instance %s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "No network for bridge %s" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "Failed to load partition: %s" +msgid "No network for instance %s" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "Token %s does not exist" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Unknown instance type: %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "Volume %s not found" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "No export device found for volume %s" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "No target id found for volume %s" msgstr "" -#: nova/compute/manager.py:77 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "No security group with id %s" msgstr "" -#: nova/compute/manager.py:82 +#: ../nova/db/sqlalchemy/api.py:1589 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/db/sqlalchemy/api.py:1756 +#, python-format +msgid "No user for id %s" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "instance %s: starting..." +msgid "No user for access key %s" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "instance %s: Failed to spawn" +msgid "No project with id %s" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "Terminating instance %s" +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/db/sqlalchemy/api.py:1996 #, python-format -msgid "Disassociating address %s" +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/db/sqlalchemy/api.py:2035 #, python-format -msgid "Deallocating address %s" +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/db/sqlalchemy/api.py:2057 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "on instance %s" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Rebooting instance %s" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/manager.py:260 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "instance %s: snapshotting" +msgid "Checking state of %s" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "instance %s: rescuing" +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "instance %s: unrescuing" +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "instance %s: pausing" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "instance %s: unpausing" +msgid "No disk at %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "instance %s: suspending" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "instance %s: resuming" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "instance %s: locking" +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "instance %s: unlocking" +msgid "instance %s: is running" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "instance %s: getting locked state" +msgid "instance %s: booted" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Contents of file %(fpath)s: %(contents)r" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "updating %s..." +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/virt/libvirt_conn.py:646 +#, python-format +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/monitor.py:377 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 +#, python-format +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "Found instance: %s" +msgid "instance %s: finished toXML method" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "No service for id %s" +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "No service for %s, %s" +msgid "Failed to get metadata for ip: %s" +msgstr "Falha ao obter metadados para o ip: %s" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "Tentativa de instanciar singleton" + +#: ../nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "No floating ip for address %s" +msgid "Target %s allocated" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../nova/virt/images.py:70 #, python-format -msgid "No instance for id %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "Instance %s not found" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "no keypair for user %s, name %s" +msgid "The key_pair %s already exists" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "No network for id %s" +msgid "Generating root CA: %s" +msgstr "Gerando CA raiz: %s" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "Criar par de chaves %s" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "Remover par de chaves %s" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s não é um ipProtocol válido" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "Intervalo de porta inválido" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "Revogado entrada do grupo de segurança %s" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "Não existe regra para os parâmetros especificados" + +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "No network for bridge %s" +msgid "Authorize security group ingress %s" +msgstr "Autorizada entrada do grupo de segurança %s" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "Esta regra já existe no grupo %s" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "Criar Grupo de Segurança %s" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "group %s já existe" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "Excluir grupo de segurança %s" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "Criar volume de %s GB" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "No network for instance %s" +msgid "Detach volume %s" +msgstr "Desanexar volume %s" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "Alocar endereço" + +#: ../nova/api/ec2/cloud.py:766 +#, python-format +msgid "Release address %s" +msgstr "Liberar endereço %s" + +#: ../nova/api/ec2/cloud.py:771 +#, python-format +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Token %s does not exist" +msgid "Disassociate address %s" +msgstr "Desatribuir endereço %s" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" +msgstr "Começando a terminar instâncias" + +#: ../nova/api/ec2/cloud.py:815 +#, python-format +msgid "Reboot instance %r" +msgstr "Reiniciar instância %r" + +#: ../nova/api/ec2/cloud.py:867 +#, python-format +msgid "De-registering image %s" +msgstr "Removendo o registro da imagem %s" + +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "No quota for project_id %s" +msgid "attribute not supported: %s" +msgstr "atributo não suportado: %s" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "id inválido: %s" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "usuário ou grupo não especificado" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "apenas o grupo \"all\" é suportado" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" +msgstr "operation_type deve ser add ou remove" + +#: ../nova/api/ec2/cloud.py:908 +#, python-format +msgid "Updating image %s publicity" +msgstr "Atualizando publicidade da imagem %s" + +#: ../bin/nova-api.py:52 +#, python-format +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../bin/nova-api.py:57 #, python-format -msgid "No volume for id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../bin/nova-api.py:59 #, python-format -msgid "Volume %s not found" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../bin/nova-api.py:64 #, python-format -msgid "No export device found for volume %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../bin/nova-api.py:69 #, python-format -msgid "No target id found for volume %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../bin/nova-api.py:83 #, python-format -msgid "No security group with id %s" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../bin/nova-api.py:89 #, python-format -msgid "No security group named %s for project: %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No secuity group rule with id %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "No user for id %s" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No user for access key %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "No project with id %s" +msgid "Argument %s is required." msgstr "" -#: nova/image/glance.py:78 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/image/glance.py:97 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Image %s could not be found" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:148 +#, python-format +msgid "Starting VM %s..." msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "Starting VLAN inteface %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "Instance %s: booted" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Instance not present %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 +#, python-format +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/network/manager.py:190 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Leasing IP %s" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "IP %s leased that isn't associated" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/network/manager.py:197 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/virt/xenapi/vmops.py:564 #, python-format -msgid "IP %s released that isn't associated" +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "IP %s released that was not leased" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "Running instances: %s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Unknown S3 value type %r" +msgid "After terminating instances: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "List keys for bucket %s" +msgid "Launching VPN for %s" +msgstr "Executando VPN para %s" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/image/s3.py:99 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "Image %s could not be found" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "Muitas falhas de autenticação." + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "Creating bucket %s" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "Deleting bucket %s" +msgid "Authentication Failure: %s" +msgstr "Falha de Autenticação: %s" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "action: %s" +msgstr "ação: %s" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "Getting object: %s / %s" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Putting object: %s / %s" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/api/ec2/__init__.py:326 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "NotFound raised: %s" +msgstr "NotFound lançado: %s" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "ApiError lançado: %s" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "Erro inexperado lançado: %s" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" +"Ocorreu um erro desconhecido. Por favor tente sua requisição novamente." + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "Usuário %s já existe" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Projeto não pode ser criado porque o gerente %s não existe." + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Projeto não pode ser criado porque o usuário %s não existe" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Projeto não pode ser criado porque o projeto %s já existe." -#: nova/objectstore/handler.py:314 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Deleting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "Projeto não pode ser modificado porque o gerente %s não existe." + +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Usuário \"%s\" não encontrado" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Projeto \"%s\" não encontrado" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Starting image upload: %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "updating %s..." +msgstr "" + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Updating user fields on image %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" +msgstr "" + +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/volume/san.py:67 #, python-format -msgid "Deleted image: %s" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/api/ec2/apirequest.py:100 +#, python-format +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/api/openstack/__init__.py:55 +#, python-format +msgid "Caught error: %s" +msgstr "Capturado o erro: %s" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "Incluindo operações administrativas na API." + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:116 #, python-format -msgid "Casting to %s %s for %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../nova/console/xvp.py:141 +#, python-format +msgid "Error starting xvp: %s" msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" +msgstr "" + +#: ../bin/nova-manage.py:448 +msgid "IP address" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 -#, python-format -msgid "instance %s: deleting instance files %s" +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:160 #, python-format -msgid "No disk at %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:292 #, python-format -msgid "instance %s: rebooted" +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:296 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:301 #, python-format -msgid "instance %s: rescued" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/compute/api.py:481 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:98 #, python-format -msgid "instance %s: is running" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: booted" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." msgstr "" +"Não foi possível conectar ao servidor AMQP após %d tentativas. Desligando." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Reconectado à fila" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Falha ao obter mensagem da fila" + +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: failed to boot" +msgid "Initing the Adapter Consumer for %s" +msgstr "Iniciando o Adaptador Consumidor para %s" + +#: ../nova/rpc.py:178 +#, python-format +msgid "received %s" +msgstr "recebido %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "sem método para mensagem: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "Sem método para mensagem: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "Retornando exceção %s ao método de origem" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" +msgstr "conteúdo descompactado: %s" + +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "Fazendo chamada assíncrona..." + +#: ../nova/rpc.py:316 +#, python-format +msgid "MSG_ID is %s" +msgstr "MSG_ID é %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:395 +#: ../nova/rpc.py:364 #, python-format -msgid "virsh said: %r" +msgid "response %s" +msgstr "resposta %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "topico é %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "mensagem %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/volume/driver.py:220 #, python-format -msgid "data: %r, fpath: %r" +msgid "FAKE AOE: %s" msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "Contents of file %s: %r" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/volume/driver.py:359 #, python-format -msgid "instance %s: Creating image" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "Sheepdog is not working: %s" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/virt/fake.py:239 #, python-format -msgid "instance %s: starting toXML method" +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/network/manager.py:153 #, python-format -msgid "instance %s: finished toXML method" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/xenapi_conn.py:113 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +#: ../nova/network/manager.py:157 +msgid "setting network host" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/network/manager.py:212 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:216 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:220 #, python-format -msgid "Got exception: %s" +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:228 #, python-format -msgid "%s: _db_content => %s" +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:237 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "IP %s released that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:241 #, python-format -msgid "Calling %s %s" +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:244 #, python-format -msgid "Calling getter %s" +msgid "IP %s released that was not leased" msgstr "" -#: nova/virt/xenapi/fake.py:340 -#, python-format +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 -#, python-format -msgid "Created VM %s as %s." +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "VDI %s is still available" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "Unknown S3 value type %r" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 -#, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "VHD %s has parent %s" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "Re-scanning SR %s" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "No VDIs found for VM %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Spawning VM %s created %s." +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Instance not present %s" +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "suspend: instance not present %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "resume: instance not present %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "Instance not found %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "Introducing %s..." +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Introduced %s as %s." +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/objectstore/handler.py:455 +#, python-format +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Looking up user: %r" +msgstr "Procurando usuário: %r" + +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "Falha de autorização para chave de acesso %s" + +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "Nenhum usuário encontrado para chave de acesso %s" + +#: ../nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "Usando nome do projeto = nome do usuário (%s)" + +#: ../nova/auth/manager.py:277 +#, python-format +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Forgetting SR %s ... " +msgid "No project called %s could be found" +msgstr "Nenhum projeto chamado %s pode ser encontrado." + +#: ../nova/auth/manager.py:287 +#, python-format +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "Invalid signature for user %s" +msgstr "Assinatura inválida para usuário %s" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "Assinatura não confere" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "Deve especificar projeto" + +#: ../nova/auth/manager.py:414 +#, python-format +msgid "The %s role can not be found" +msgstr "O papel %s não foi encontrado" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" +msgstr "O papel %s é apenas global" + +#: ../nova/auth/manager.py:420 +#, python-format +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Forgetting SR %s done." +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "modifying project %s" +msgstr "modificando projeto %s" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Deleting project %s" +msgstr "Excluindo projeto %s" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Deleting user %s" +msgstr "Apagando usuário %s" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "No vpn data for project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/service.py:161 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Starting %(topic)s node (version %(vcs_string)s)" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "Encerrado serviço que não tem entrada na base de dados" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "O objeto da base de dados do serviço desapareceu, Recriando." + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "Recuperada conexão servidor de modelo." + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "servidor de modelo perdido" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Detach_volume: %s, %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "Unable to locate volume %s" +msgid "LDAP object for %s doesn't exist" +msgstr "Objeto LDAP para %s não existe" + +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "Unable to detach volume %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/ldapdriver.py:513 +#, python-format +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/auth/ldapdriver.py:528 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" +"Tentatica de remover o último membto de um grupo. Ao invés disso excluindo o " +"grupo %s." -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Group at dn %s doesn't exist" +msgstr "Grupo no dn %s não existe" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "Re-exporting %s volumes" +msgid "Found no network for bridge %s" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/api/ec2/admin.py:97 #, python-format -msgid "volume %s: creating" +msgid "Creating new user: %s" +msgstr "Criando novo usuário: %s" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "Excluindo usuário: %s" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/api/ec2/admin.py:131 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "Adding sitewide role %(role)s to user %(user)s" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/api/ec2/admin.py:137 #, python-format -msgid "volume %s: creating export" +msgid "Removing role %(role)s from user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/api/ec2/admin.py:141 #, python-format -msgid "volume %s: created successfully" +msgid "Removing sitewide role %(role)s from user %(user)s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "operações devem ser adicionar e excluir" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/api/ec2/admin.py:190 #, python-format -msgid "volume %s: removing export" +msgid "Modify project: %(name)s managed by %(manager_user)s" msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/api/ec2/admin.py:200 #, python-format -msgid "volume %s: deleting" +msgid "Delete project: %s" +msgstr "Excluir projeto: %s" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/api/ec2/admin.py:218 #, python-format -msgid "volume %s: deleted successfully" +msgid "Removing user %(user)s from project %(project)s" msgstr "" + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Comando: %s\n" +#~ "Código de retorno: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" + +#, python-format +#~ msgid "(%s) publish (key: %s) %s" +#~ msgstr "(%s) publicar (key: %s) %s" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "Servidor AMQP em %s:%d inatingível. Tentando novamente em %d segundos." + +#, python-format +#~ msgid "Binding %s to %s with key %s" +#~ msgstr "Atribuindo %s para %s com chave %s" + +#, python-format +#~ msgid "Getting from %s: %s" +#~ msgstr "Obtendo de %s: %s" + +#, python-format +#~ msgid "Starting %s node" +#~ msgstr "Iniciando nó %s" + +#, python-format +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "" +#~ "Repositório de dados %s não pode ser atingido. Tentando novamente em %d " +#~ "segundos." + +#, python-format +#~ msgid "Couldn't get IP, using 127.0.0.1 %s" +#~ msgstr "Não foi possível obter IP, usando 127.0.0.1 %s" + +#, python-format +#~ msgid "" +#~ "Access key %s has had %d failed authentications and will be locked out for " +#~ "%d minutes." +#~ msgstr "" +#~ "Chave de acesso %s tem %d falhas de autenticação e vai ser bloqueada por %d " +#~ "minutos." + +#, python-format +#~ msgid "arg: %s\t\tval: %s" +#~ msgstr "argumento: %s\t\tvalor: %s" + +#, python-format +#~ msgid "Authenticated Request For %s:%s)" +#~ msgstr "Pedido de Autenticação Para: %s:%s" + +#, python-format +#~ msgid "Adding sitewide role %s to user %s" +#~ msgstr "Adicionando papel em todo site %s ao usuário %s" + +#, python-format +#~ msgid "Adding role %s to user %s for project %s" +#~ msgstr "Adicionando papel %s ao usuário %s para o projeto %s" + +#, python-format +#~ msgid "Unauthorized request for controller=%s and action=%s" +#~ msgstr "Requisição não autorizada para controlador=%s e ação=%s" + +#, python-format +#~ msgid "Removing role %s from user %s for project %s" +#~ msgstr "Removendo papel %s do usuário %s para o projeto %s" + +#, python-format +#~ msgid "Getting x509 for user: %s on project: %s" +#~ msgstr "Obtendo x509 para usuário: %s do projeto: %s" + +#, python-format +#~ msgid "Create project %s managed by %s" +#~ msgstr "Criar projeto %s gerenciado por %s" + +#, python-format +#~ msgid "Removing user %s from project %s" +#~ msgstr "Excluindo usuário %s do projeto %s" + +#, python-format +#~ msgid "Adding user %s to project %s" +#~ msgstr "Adicionando usuário %s ao projeto %s" + +#, python-format +#~ msgid "Unsupported API request: controller = %s,action = %s" +#~ msgstr "Requisição de API não suportada: controlador = %s,ação = %s" + +#, python-format +#~ msgid "Removing sitewide role %s from user %s" +#~ msgstr "Removendo papel %s em todo site do usuário %s" + +#, python-format +#~ msgid "Associate address %s to instance %s" +#~ msgstr "Atribuir endereço %s à instância %s" + +#, python-format +#~ msgid "Attach volume %s to instacne %s at %s" +#~ msgstr "Anexar volume %s para instância %s em %s" + +#, python-format +#~ msgid "Registered image %s with id %s" +#~ msgstr "Registrada imagem %s com id %s" + +#, python-format +#~ msgid "User %s is already a member of the group %s" +#~ msgstr "Usuário %s já pertence ao grupo %s" + +#, python-format +#~ msgid "User %s is not a member of project %s" +#~ msgstr "Usuário %s não é membro do projeto %s" + +#, python-format +#~ msgid "failed authorization: no project named %s (user=%s)" +#~ msgstr "falha de autorização: nenhum projeto de nome %s (usuário=%s)" + +#, python-format +#~ msgid "Failed authorization: user %s not admin and not member of project %s" +#~ msgstr "" +#~ "Falha de autorização: usuário %s não é administrador nem membro do projeto %s" + +#, python-format +#~ msgid "Created project %s with manager %s" +#~ msgstr "Criado projeto %s com gerente %s" + +#, python-format +#~ msgid "Removing role %s from user %s on project %s" +#~ msgstr "Removendo papel %s do usuário %s no projeto %s" + +#, python-format +#~ msgid "Adding role %s to user %s in project %s" +#~ msgstr "Adicionando papel %s ao usuário %s no projeto %s" + +#, python-format +#~ msgid "Remove user %s from project %s" +#~ msgstr "Remover usuário %s do projeto %s" + +#, python-format +#~ msgid "Created user %s (admin: %r)" +#~ msgstr "Criado usuário %s (administrador: %r)" @@ -7,2132 +7,2961 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-01-31 06:53+0000\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-03-30 07:06+0000\n" "Last-Translator: Andrey Olykainen <Unknown>\n" "Language-Team: Russian <ru@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-31 05:58+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 -msgid "Filename of root CA" +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" msgstr "" -#: nova/crypto.py:49 -msgid "Filename of private key" -msgstr "Имя файла секретного ключа" +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Неожиданная ошибка при выполнении команды." -#: nova/crypto.py:51 -msgid "Filename of root Certificate Revokation List" +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" msgstr "" -#: nova/crypto.py:53 -msgid "Where we keep our keys" -msgstr "Путь к ключам" +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" -#: nova/crypto.py:55 -msgid "Where we keep our root CA" +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Необработанное исключение" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" msgstr "" -#: nova/crypto.py:57 -msgid "Should we use a CA for each project?" +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" msgstr "" -#: nova/crypto.py:61 +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 #, python-format -msgid "Subject for certificate for users, %s for project, user, timestamp" +msgid "%(param)s property not found for image %(_image_id)s" msgstr "" -#: nova/crypto.py:66 +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 #, python-format -msgid "Subject for certificate for projects, %s for project, timestamp" +msgid "Compute.api::lock %s" msgstr "" -#: nova/crypto.py:71 +#: ../nova/api/openstack/servers.py:253 #, python-format -msgid "Subject for certificate for vpns, %s for project, timestamp" +msgid "Compute.api::unlock %s" msgstr "" -#: nova/crypto.py:258 +#: ../nova/api/openstack/servers.py:267 #, python-format -msgid "Flags path: %s" +msgid "Compute.api::get_lock %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "Неожиданная ошибка при выполнении команды." +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" -#: nova/exception.py:36 +#: ../nova/api/openstack/servers.py:292 #, python-format -msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Команда: %s\n" -"Код завершения: %s\n" -"Stdout: %r\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Необработанное исключение" +msgid "Compute.api::pause %s" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/api/openstack/servers.py:303 #, python-format -msgid "(%s) publish (key: %s) %s" +msgid "Compute.api::unpause %s" msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/api/openstack/servers.py:314 #, python-format -msgid "Publishing to route %s" +msgid "compute.api::suspend %s" msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/api/openstack/servers.py:325 #, python-format -msgid "Declaring queue %s" -msgstr "Объявление очереди %s" +msgid "compute.api::resume %s" +msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "Неверное число аргументов." + +#: ../nova/twistd.py:209 #, python-format -msgid "Declaring exchange %s" -msgstr "Объявление точки обмена %s" +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s не обнаружен. Демон не запущен?\n" -#: nova/fakerabbit.py:95 +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 #, python-format -msgid "Binding %s to %s with key %s" +msgid "Serving %s" msgstr "" -#: nova/fakerabbit.py:120 +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 #, python-format -msgid "Getting from %s: %s" -msgstr "Получение из %s: %s" +msgid "Starting %s" +msgstr "Запускается %s" -#: nova/rpc.py:92 +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "AMQP сервер %s:%d недоступен. Повторная попытка через %d секунд." +msgid "Instance %s not found" +msgstr "" -#: nova/rpc.py:99 +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." -msgstr "Не удалось подключиться к серверу AMQP после %d попыток. Выключение." +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "Переподлючено к очереди" +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "Не удалось получить сообщение из очереди" +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" -#: nova/rpc.py:155 +#: ../nova/virt/xenapi/volumeops.py:91 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "Unable to attach volume to instance %s" msgstr "" -#: nova/rpc.py:170 +#: ../nova/virt/xenapi/volumeops.py:93 #, python-format -msgid "received %s" -msgstr "получено %s" +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" -#: nova/rpc.py:183 +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 #, python-format -msgid "no method for message: %s" -msgstr "не определен метод для сообщения: %s" +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" -#: nova/rpc.py:184 +#: ../nova/virt/xenapi/volumeops.py:112 #, python-format -msgid "No method for message: %s" -msgstr "Не определен метод для сообщения: %s" +msgid "Unable to locate volume %s" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/virt/xenapi/volumeops.py:120 #, python-format -msgid "Returning exception %s to caller" +msgid "Unable to detach volume %s" msgstr "" -#: nova/rpc.py:286 +#: ../nova/virt/xenapi/volumeops.py:127 #, python-format -msgid "unpacked context: %s" +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "Выполняется асинхронный вызов..." +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" -#: nova/rpc.py:308 +#: ../nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: ../nova/crypto.py:49 +msgid "Filename of private key" +msgstr "Имя файла секретного ключа" + +#: ../nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: ../nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "Путь к ключам" + +#: ../nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: ../nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: ../nova/crypto.py:61 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID is %s" +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" -#: nova/rpc.py:356 +#: ../nova/crypto.py:66 #, python-format -msgid "response %s" -msgstr "ответ %s" +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" -#: nova/rpc.py:365 +#: ../nova/crypto.py:71 #, python-format -msgid "topic is %s" -msgstr "тема %s" +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" -#: nova/rpc.py:366 +#: ../nova/crypto.py:258 #, python-format -msgid "message %s" -msgstr "сообщение %s" +msgid "Flags path: %s" +msgstr "" -#: nova/service.py:157 +#: ../nova/scheduler/manager.py:69 #, python-format -msgid "Starting %s node" -msgstr "Запускается нода %s" +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." -msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." +#: ../nova/compute/manager.py:80 +#, python-format +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" +#: ../nova/compute/manager.py:84 +#, python-format +msgid "check_instance_lock: locked: |%s|" msgstr "" -#: nova/service.py:208 -msgid "model server went away" +#: ../nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: admin: |%s|" msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." -msgstr "Хранилище данных %s недоступно. Повторная попытка через %d секунд." +msgid "check_instance_lock: executing: |%s|" +msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Serving %s" +msgid "check_instance_lock: not executing |%s|" msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:180 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s не обнаружен. Демон не запущен?\n" +msgid "instance %s: starting..." +msgstr "" -#: nova/twistd.py:268 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Starting %s" -msgstr "Запускается %s" +msgid "instance %s: Failed to spawn" +msgstr "" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "Inner Exception: %s" -msgstr "Вложенное исключение: %s" +msgid "Terminating instance %s" +msgstr "" -#: nova/utils.py:54 +#: ../nova/compute/manager.py:255 #, python-format -msgid "Class %s cannot be found" -msgstr "Класс %s не найден" +msgid "Deallocating address %s" +msgstr "" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:268 #, python-format -msgid "Fetching %s" +msgid "trying to destroy already destroyed instance: %s" msgstr "" -#: nova/utils.py:125 +#: ../nova/compute/manager.py:282 #, python-format -msgid "Running cmd (subprocess): %s" +msgid "Rebooting instance %s" msgstr "" -#: nova/utils.py:138 +#: ../nova/compute/manager.py:287 #, python-format -msgid "Result was %s" -msgstr "Результат %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/utils.py:171 +#: ../nova/compute/manager.py:311 #, python-format -msgid "debug in callback: %s" +msgid "instance %s: snapshotting" msgstr "" -#: nova/utils.py:176 +#: ../nova/compute/manager.py:316 #, python-format -msgid "Running %s" -msgstr "Выполняется %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/utils.py:207 +#: ../nova/compute/manager.py:332 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "Не удалось получить IP, используем 127.0.0.1 %s" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/utils.py:289 +#: ../nova/compute/manager.py:335 #, python-format -msgid "Invalid backend: %s" +msgid "instance %s: setting admin password" msgstr "" -#: nova/utils.py:300 +#: ../nova/compute/manager.py:353 #, python-format -msgid "backend %s" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "Слишком много неудачных попыток аутентификации." +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/compute/manager.py:372 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "instance %s: rescuing" msgstr "" -"Ключ доступа %s имеет %d неудачных попыток аутентификации и будет " -"заблокирован на %d минут." -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/compute/manager.py:387 #, python-format -msgid "Authentication Failure: %s" -msgstr "Ошибка аутентификации: %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/compute/manager.py:406 #, python-format -msgid "Authenticated Request For %s:%s)" -msgstr "Запрос аутентификации для %s:%s)" +msgid "instance %s: pausing" +msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/compute/manager.py:423 #, python-format -msgid "action: %s" -msgstr "действие: %s" +msgid "instance %s: unpausing" +msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/compute/manager.py:440 #, python-format -msgid "arg: %s\t\tval: %s" -msgstr "arg: %s\t\tval: %s" +msgid "instance %s: retrieving diagnostics" +msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/compute/manager.py:453 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "instance %s: suspending" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/compute/manager.py:472 #, python-format -msgid "NotFound raised: %s" +msgid "instance %s: resuming" msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/compute/manager.py:491 #, python-format -msgid "ApiError raised: %s" +msgid "instance %s: locking" msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/compute/manager.py:503 #, python-format -msgid "Unexpected error raised: %s" +msgid "instance %s: unlocking" msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/compute/manager.py:513 +#, python-format +msgid "instance %s: getting locked state" msgstr "" -"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." -#: nova/api/ec2/admin.py:84 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Creating new user: %s" -msgstr "Создание нового пользователя: %s" +msgid "instance %s: reset network" +msgstr "" -#: nova/api/ec2/admin.py:92 +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 #, python-format -msgid "Deleting user: %s" -msgstr "Удаление пользователя: %s" +msgid "Get console output for instance %s" +msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/compute/manager.py:543 #, python-format -msgid "Adding role %s to user %s for project %s" -msgstr "Добавление роли %s для пользователя %s для проекта %s" +msgid "instance %s: getting ajax console" +msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Removing role %s from user %s for project %s" -msgstr "Удаление роли %s пользователя %s для проекта %s" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#: ../nova/compute/manager.py:588 +#, python-format +msgid "Detaching volume from unknown instance %s" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Host %s is not alive" msgstr "" -#: nova/api/ec2/admin.py:159 +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "Create project %s managed by %s" -msgstr "Создать проект %s под управлением %s" +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/volume/manager.py:85 #, python-format -msgid "Delete project: %s" -msgstr "Удалить проект: %s" +msgid "Re-exporting %s volumes" +msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Adding user %s to project %s" -msgstr "Добавление пользователя %s к проекту %s" +msgid "volume %s: skipping export" +msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Removing user %s from project %s" -msgstr "Удаление пользователя %s с проекта %s" +msgid "volume %s: creating" +msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/volume/manager.py:108 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/volume/manager.py:112 #, python-format -msgid "Generating root CA: %s" +msgid "volume %s: creating export" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/volume/manager.py:123 #, python-format -msgid "Create key pair %s" -msgstr "Создание пары ключей %s" +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/volume/manager.py:136 #, python-format -msgid "Delete key pair %s" -msgstr "Удаление пары ключей %s" +msgid "volume %s: removing export" +msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/volume/manager.py:138 #, python-format -msgid "%s is not a valid ipProtocol" +msgid "volume %s: deleting" msgstr "" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" -msgstr "Неверный диапазон портов" +#: ../nova/volume/manager.py:147 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "Revoke security group ingress %s" +msgid "%(text)s: _db_content => %(content)s" msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "Authorize security group ingress %s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "This rule already exists in group %s" -msgstr "Это правило уже существует в группе %s" +msgid "Calling %(localname)s %(impl)s" +msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "Create Security Group %s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "group %s already exists" -msgstr "группа %s уже существует" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Delete security group %s" +msgid "Need to watch instance %s until it's running..." msgstr "" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Get console output for instance %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Create volume of %s GB" -msgstr "Создание раздела %s ГБ" +msgid "Starting Bridge interface for %s" +msgstr "" -#: nova/api/ec2/cloud.py:567 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Detach volume %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Release address %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/cloud.py:696 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Associate address %s to instance %s" +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/utils.py:58 #, python-format -msgid "Disassociate address %s" +msgid "Inner Exception: %s" +msgstr "Вложенное исключение: %s" + +#: ../nova/utils.py:59 +#, python-format +msgid "Class %s cannot be found" +msgstr "Класс %s не найден" + +#: ../nova/utils.py:118 +#, python-format +msgid "Fetching %s" msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#: ../nova/utils.py:130 +#, python-format +msgid "Running cmd (subprocess): %s" msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format -msgid "Reboot instance %r" +msgid "Result was %s" +msgstr "Результат %s" + +#: ../nova/utils.py:159 +#, python-format +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/utils.py:217 #, python-format -msgid "De-registering image %s" +msgid "debug in callback: %s" msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/utils.py:222 #, python-format -msgid "Registered image %s with id %s" +msgid "Running %s" +msgstr "Выполняется %s" + +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/utils.py:265 #, python-format -msgid "attribute not supported: %s" -msgstr "аттрибут не поддерживается: %s" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" -#: nova/api/ec2/cloud.py:794 +#: ../nova/utils.py:363 #, python-format -msgid "invalid id: %s" +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" -msgstr "не указан пользователь или группа" +#: ../nova/utils.py:374 +#, python-format +msgid "backend %s" +msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#: ../nova/fakerabbit.py:49 +#, python-format +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/fakerabbit.py:54 +#, python-format +msgid "Publishing to route %s" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "Updating image %s publicity" +msgid "Declaring queue %s" +msgstr "Объявление очереди %s" + +#: ../nova/fakerabbit.py:90 +#, python-format +msgid "Declaring exchange %s" +msgstr "Объявление точки обмена %s" + +#: ../nova/fakerabbit.py:96 +#, python-format +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Failed to get metadata for ip: %s" -msgstr "Ошибка получения метаданных для ip: %s" +msgid "Getting from %(queue)s: %(message)s" +msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Caught error: %s" +msgid "Created VM %s..." msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Compute.api::lock %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Compute.api::unlock %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Compute.api::get_lock %s" +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "Compute.api::pause %s" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Compute.api::unpause %s" +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "compute.api::suspend %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "compute.api::resume %s" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "User %s already exists" -msgstr "Пользователь %s уже существует" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." +msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "Project can't be created because manager %s doesn't exist" -msgstr "Проект не может быть создан поскольку менеджер %s не существует" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." +msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "Project can't be created because project %s already exists" -msgstr "Проект не может быть созан поскольку проект %s уже существует" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." +msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:286 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:327 #, python-format -msgid "User \"%s\" not found" -msgstr "Пользователь \"%s\" не существует" +msgid "Size for image %(image)s:%(virtual_size)d" +msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Project \"%s\" not found" -msgstr "Проект \"%s\" не найден" +msgid "Glance image %s" +msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 +#, python-format +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "LDAP object for %s doesn't exist" -msgstr "Объект LDAP %s не существует" +msgid "Kernel/Ramdisk VDI %s destroyed" +msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Project can't be created because user %s doesn't exist" -msgstr "Проект не может быть создан поскольку пользователь %s не существует" +msgid "Asking xapi to fetch %(url)s as %(access)s" +msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "User %s is already a member of the group %s" -msgstr "Пользователь %s уже член группы %s" +msgid "Looking up vdi %s for PV kernel" +msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "Running pygrub against %s" msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Looking up user: %r" +msgid "Found Xen kernel %s" +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Failed authorization for access key %s" +msgid "duplicate name found: %s" msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "No user found for access key %s" +msgid "VDI %s is still available" msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "Using project name = user name (%s)" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "No project called %s could be found" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "Re-scanning SR %s" msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "User %s is not a member of project %s" -msgstr "Пользователь %s не является членом группы %s" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." +msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "Invalid signature for user %s" -msgstr "Не допустимая подпись для пользователя %s" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" -msgstr "Подпись не совпадает" +#: ../nova/virt/xenapi/vm_utils.py:590 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" -msgstr "Необходимо указать проект" +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" +msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "The %s role can not be found" -msgstr "Роль %s не может быть найдена" +msgid "Creating VBD for VDI %s ... " +msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "The %s role is global only" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/manager.py:412 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "Adding role %s to user %s in project %s" -msgstr "Добавление роли %s для пользователя %s в проект %s" +msgid "Plugging VBD %s ... " +msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "Removing role %s from user %s on project %s" -msgstr "Удаление роли %s пользователя %s в проекте %s" +msgid "Plugging VBD %s done." +msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Created project %s with manager %s" -msgstr "Создан проект %s под управлением %s" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" +msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "modifying project %s" -msgstr "изменение проекта %s" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" +msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "Remove user %s from project %s" -msgstr "Удалить пользователя %s из проекта %s" +msgid "Destroying VBD for VDI %s ... " +msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "Deleting project %s" -msgstr "Удаление проекта %s" +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Created user %s (admin: %r)" -msgstr "Создан пользователь %s (администратор: %r)" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" +msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "Deleting user %s" -msgstr "Удаление пользователя %s" +msgid "Ignoring XenAPI.Failure %s" +msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format -msgid "Access Key change for user %s" +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Secret Key change for user %s" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "Admin status set to %r for user %s" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "No vpn data for project %s" -msgstr "Нет vpn данных для проекта %s" +msgid "Nested return %s" +msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" +msgstr "Получено %s" + +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:133 +#, python-format +msgid "No service for id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:251 +#, python-format +msgid "No service for %(host)s, %(binary)s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "Launching VPN for %s" -msgstr "Запуск VPN для %s" +msgid "No floating ip for address %s" +msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No address for instance %s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "Instance %d has no host" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "No network for id %s" msgstr "" -#: nova/compute/api.py:94 +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +msgid "No network for bridge %s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 +#, python-format +msgid "No network for instance %s" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Going to run %s instances..." +msgid "Token %s does not exist" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Going to try and terminate %s" +msgid "Volume %s not found" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "Instance %d was not found during terminate" +msgid "No export device found for volume %s" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "Instance %d is already being terminated" +msgid "No target id found for volume %s" msgstr "" -#: nova/compute/api.py:450 +#: ../nova/db/sqlalchemy/api.py:1572 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "No security group with id %s" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/db/sqlalchemy/api.py:1682 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "No secuity group rule with id %s" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "No user for id %s" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "No user for access key %s" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "Failed to load partition: %s" +msgid "No project with id %s" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "Failed to mount filesystem: %s" -msgstr "Ошибка монтирования файловой системы: %s" +msgid "No console pool with id %(pool_id)s" +msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/db/sqlalchemy/api.py:1996 #, python-format -msgid "Unknown instance type: %s" +msgid "" +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/db/sqlalchemy/api.py:2035 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/db/sqlalchemy/api.py:2057 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "on instance %s" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/manager.py:77 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/manager.py:82 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "Checking state of %s" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/virt/libvirt_conn.py:183 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "instance %s: starting..." +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Terminating instance %s" +msgid "No disk at %s" +msgstr "Нет диска в %s" + +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Disassociating address %s" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Deallocating address %s" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "Rebooting instance %s" +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/manager.py:260 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "instance %s: is running" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "instance %s: snapshotting" +msgid "instance %s: booted" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "instance %s: rescuing" +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "instance %s: unrescuing" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "instance %s: pausing" +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "instance %s: unpausing" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "instance %s: suspending" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:401 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "instance %s: resuming" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:420 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 #, python-format -msgid "instance %s: locking" +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "instance %s: unlocking" +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "instance %s: getting locked state" +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Failed to get metadata for ip: %s" +msgstr "Ошибка получения метаданных для ip: %s" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/network/api.py:39 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Quota exceeeded for %s, tried to allocate address" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/virt/images.py:70 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Finished retreving %(url)s -- placed in %(path)s" +msgstr "" + +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/console/manager.py:90 #, python-format -msgid "updating %s..." -msgstr "обновление %s..." +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" -msgstr "неожиданная ошибка во время обновления" +#: ../nova/api/direct.py:149 +msgid "not available" +msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/monitor.py:377 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "Создание пары ключей %s" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "Удаление пары ключей %s" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "Неверный диапазон портов" + +#: ../nova/api/ec2/cloud.py:421 #, python-format -msgid "Found instance: %s" +msgid "Revoke security group ingress %s" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." msgstr "" -#: nova/db/sqlalchemy/api.py:132 -#, python-format -msgid "No service for id %s" +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "No service for %s, %s" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "No floating ip for address %s" +msgid "This rule already exists in group %s" +msgstr "Это правило уже существует в группе %s" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "группа %s уже существует" + +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "No instance for id %s" +msgid "Delete security group %s" msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../nova/api/ec2/cloud.py:584 #, python-format -msgid "Instance %s not found" +msgid "Create volume of %s GB" +msgstr "Создание раздела %s ГБ" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "no keypair for user %s, name %s" +msgid "Detach volume %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "No network for id %s" +msgid "Release address %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "No network for bridge %s" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "No network for instance %s" +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "Token %s does not exist" +msgid "Reboot instance %r" msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../nova/api/ec2/cloud.py:867 #, python-format -msgid "No quota for project_id %s" +msgid "De-registering image %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../nova/api/ec2/cloud.py:875 #, python-format -msgid "No volume for id %s" +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Volume %s not found" +msgid "attribute not supported: %s" +msgstr "аттрибут не поддерживается: %s" + +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "не указан пользователь или группа" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "No export device found for volume %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../bin/nova-api.py:52 #, python-format -msgid "No target id found for volume %s" +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../bin/nova-api.py:57 #, python-format -msgid "No security group with id %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../bin/nova-api.py:59 #, python-format -msgid "No security group named %s for project: %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../bin/nova-api.py:64 #, python-format -msgid "No secuity group rule with id %s" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../bin/nova-api.py:69 #, python-format -msgid "No user for id %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../bin/nova-api.py:83 #, python-format -msgid "No user for access key %s" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#: ../bin/nova-api.py:89 #, python-format -msgid "No project with id %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/image/glance.py:78 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/image/glance.py:97 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/image/s3.py:82 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "Image %s could not be found" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/network/api.py:39 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "Argument %s is required." msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 +#, python-format +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/network/linux_net.py:176 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "Starting Bridge interface for %s" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "Starting VM %s..." msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/virt/xenapi/vmops.py:162 +#, python-format +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/network/manager.py:190 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "Leasing IP %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "IP %s leased that isn't associated" +msgid "Instance %s: booted" msgstr "" -#: nova/network/manager.py:197 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "Instance not present %s" msgstr "" -#: nova/network/manager.py:205 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "IP %s released that isn't associated" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/virt/xenapi/vmops.py:356 #, python-format -msgid "IP %s released that was not leased" +msgid "VM %(vm)s already halted, skipping shutdown..." msgstr "" -#: nova/network/manager.py:442 +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/virt/xenapi/vmops.py:564 #, python-format -msgid "Unknown S3 value type %r" +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/virt/xenapi/vmops.py:569 +#, python-format +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "List keys for bucket %s" +msgid "Running instances: %s" msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Creating bucket %s" +msgid "Launching VPN for %s" +msgstr "Запуск VPN для %s" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/image/s3.py:99 #, python-format -msgid "Deleting bucket %s" +msgid "Image %s could not be found" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "Слишком много неудачных попыток аутентификации." + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "Getting object: %s / %s" -msgstr "Получение объекта: %s / %s" +msgid "Authentication Failure: %s" +msgstr "Ошибка аутентификации: %s" -#: nova/objectstore/handler.py:274 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "Putting object: %s / %s" -msgstr "Вставка объекта: %s / %s" +msgid "action: %s" +msgstr "действие: %s" -#: nova/objectstore/handler.py:295 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "Deleting object: %s / %s" -msgstr "Удаление объекта: %s / %s" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" +msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/api/ec2/__init__.py:320 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/api/ec2/__init__.py:326 #, python-format -msgid "Starting image upload: %s" +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/api/ec2/__init__.py:329 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" +"Произошла неизвестная ошибка. Пожалуйста, попытайтесь повторить ваш запрос." -#: nova/objectstore/handler.py:433 +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Updating user fields on image %s" +msgid "User %s already exists" +msgstr "Пользователь %s уже существует" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "Проект не может быть создан поскольку менеджер %s не существует" + +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "Проект не может быть создан поскольку пользователь %s не существует" + +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "Проект не может быть созан поскольку проект %s уже существует" + +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "User \"%s\" not found" +msgstr "Пользователь \"%s\" не существует" + +#: ../nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "Проект \"%s\" не найден" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Deleted image: %s" -msgstr "Удаленное изображение: %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" +msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/virt/xenapi_conn.py:317 +#, python-format +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 +#, python-format +msgid "Got exception: %s" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Casting to %s %s for %s" +msgid "updating %s..." +msgstr "обновление %s..." + +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "неожиданная ошибка во время обновления" + +#: ../nova/compute/monitor.py:356 +#, python-format +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/compute/monitor.py:379 +#, python-format +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../nova/compute/monitor.py:429 +#, python-format +msgid "Found instance: %s" msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../nova/volume/san.py:67 +#, python-format +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/tests/test_cloud.py:210 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Need to watch instance %s until it's running..." +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/tests/test_compute.py:104 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Running instances: %s" +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." msgstr "" -#: nova/tests/test_compute.py:110 +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 #, python-format -msgid "After terminating instances: %s" +msgid "Re-wrote %s" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" +msgstr "" + +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" +msgstr "" + +#: ../nova/console/xvp.py:141 #, python-format -msgid "Nested received %s, %s" +msgid "Error starting xvp: %s" +msgstr "" + +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" +msgstr "" + +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." +msgstr "" + +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." +msgstr "" + +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" +msgstr "" + +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." +msgstr "" + +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested return %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Received %s" -msgstr "Получено %s" +msgid "Failed to mount filesystem: %s" +msgstr "Ошибка монтирования файловой системы: %s" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Target %s allocated" +msgid "nbd device %s did not show up" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "Запускается VM %s " -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "Запущен VM %s " -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "Создан диск для %s" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 -#, python-format -msgid "instance %s: deleting instance files %s" +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:160 #, python-format -msgid "No disk at %s" -msgstr "Нет диска в %s" - -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:187 #, python-format -msgid "instance %s: rebooted" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:292 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rescued" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: is running" +msgid "Invalid device specified: %s. Example device: /dev/vdb" msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 #, python-format -msgid "instance %s: booted" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: failed to boot" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "Не удалось подключиться к серверу AMQP после %d попыток. Выключение." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Переподлючено к очереди" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "Не удалось получить сообщение из очереди" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:395 +#: ../nova/rpc.py:178 #, python-format -msgid "virsh said: %r" +msgid "received %s" +msgstr "получено %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "не определен метод для сообщения: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "Не определен метод для сообщения: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "Выполняется асинхронный вызов..." + +#: ../nova/rpc.py:316 #, python-format -msgid "data: %r, fpath: %r" +msgid "MSG_ID is %s" +msgstr "MSG_ID is %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/rpc.py:364 +#, python-format +msgid "response %s" +msgstr "ответ %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "тема %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "сообщение %s" + +#: ../nova/volume/driver.py:78 #, python-format -msgid "Contents of file %s: %r" +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/volume/driver.py:87 #, python-format -msgid "instance %s: Creating image" +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:220 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "FAKE AOE: %s" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/volume/driver.py:359 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: starting toXML method" +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/wsgi.py:68 #, python-format -msgid "instance %s: finished toXML method" +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/network/manager.py:153 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: ../nova/network/manager.py:157 +msgid "setting network host" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:212 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:216 #, python-format -msgid "Got exception: %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:220 #, python-format -msgid "%s: _db_content => %s" -msgstr "%s: _db_content => %s" +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" +msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:233 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:237 #, python-format -msgid "Calling %s %s" -msgstr "Звонок %s %s" +msgid "IP %s released that isn't associated" +msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:241 #, python-format -msgid "Calling getter %s" +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/network/manager.py:244 #, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "VDI %s is still available" +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "VHD %s has parent %s" +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Re-scanning SR %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "No VDIs found for VM %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Instance not present %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "suspend: instance not present %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "resume: instance not present %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Instance not found %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Introducing %s..." +msgid "Deleted image: %s" +msgstr "Удаленное изображение: %s" + +#: ../nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:263 #, python-format -msgid "Introduced %s as %s." +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Forgetting SR %s ... " +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:287 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Forgetting SR %s done." +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 +#, python-format +msgid "Invalid signature for user %s" +msgstr "Не допустимая подпись для пользователя %s" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "Подпись не совпадает" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "Необходимо указать проект" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "The %s role can not be found" +msgstr "Роль %s не может быть найдена" + +#: ../nova/auth/manager.py:416 +#, python-format +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "modifying project %s" +msgstr "изменение проекта %s" + +#: ../nova/auth/manager.py:545 +#, python-format +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Deleting project %s" +msgstr "Удаление проекта %s" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "Deleting user %s" +msgstr "Удаление пользователя %s" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Detach_volume: %s, %s" +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Unable to locate volume %s" +msgid "No vpn data for project %s" +msgstr "Нет vpn данных для проекта %s" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" +msgstr "" + +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "Объект сервиса в базе данных отсутствует, Повторное создание." + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Unable to detach volume %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "Объект LDAP %s не существует" + +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/ldapdriver.py:495 +#, python-format +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/ldapdriver.py:507 +#, python-format +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 +#, python-format +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:524 #, python-format -msgid "volume group %s doesn't exist" +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:528 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "FAKE ISCSI: %s" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "Re-exporting %s volumes" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "volume %s: creating" +msgid "Group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/virt/xenapi/network_utils.py:40 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "Found non-unique network for bridge %s" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "volume %s: creating export" +msgid "Found no network for bridge %s" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/api/ec2/admin.py:97 #, python-format -msgid "volume %s: created successfully" +msgid "Creating new user: %s" +msgstr "Создание нового пользователя: %s" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "Удаление пользователя: %s" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/api/ec2/admin.py:141 #, python-format -msgid "volume %s: removing export" +msgid "Removing sitewide role %(role)s from user %(user)s" msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 #, python-format -msgid "volume %s: deleting" +msgid "Getting x509 for user: %(name)s on project: %(project)s" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/api/ec2/admin.py:177 #, python-format -msgid "volume %s: deleted successfully" +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "Удалить проект: %s" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" + +#, python-format +#~ msgid "arg: %s\t\tval: %s" +#~ msgstr "arg: %s\t\tval: %s" + +#, python-format +#~ msgid "Adding role %s to user %s for project %s" +#~ msgstr "Добавление роли %s для пользователя %s для проекта %s" + +#, python-format +#~ msgid "Removing role %s from user %s for project %s" +#~ msgstr "Удаление роли %s пользователя %s для проекта %s" + +#, python-format +#~ msgid "Create project %s managed by %s" +#~ msgstr "Создать проект %s под управлением %s" + +#, python-format +#~ msgid "Removing user %s from project %s" +#~ msgstr "Удаление пользователя %s с проекта %s" + +#, python-format +#~ msgid "Adding user %s to project %s" +#~ msgstr "Добавление пользователя %s к проекту %s" + +#, python-format +#~ msgid "User %s is already a member of the group %s" +#~ msgstr "Пользователь %s уже член группы %s" + +#, python-format +#~ msgid "User %s is not a member of project %s" +#~ msgstr "Пользователь %s не является членом группы %s" + +#, python-format +#~ msgid "Created project %s with manager %s" +#~ msgstr "Создан проект %s под управлением %s" + +#, python-format +#~ msgid "Removing role %s from user %s on project %s" +#~ msgstr "Удаление роли %s пользователя %s в проекте %s" + +#, python-format +#~ msgid "Remove user %s from project %s" +#~ msgstr "Удалить пользователя %s из проекта %s" + +#, python-format +#~ msgid "Created user %s (admin: %r)" +#~ msgstr "Создан пользователь %s (администратор: %r)" + +#, python-format +#~ msgid "Adding role %s to user %s in project %s" +#~ msgstr "Добавление роли %s для пользователя %s в проект %s" + +#, python-format +#~ msgid "Getting object: %s / %s" +#~ msgstr "Получение объекта: %s / %s" + +#, python-format +#~ msgid "Deleting object: %s / %s" +#~ msgstr "Удаление объекта: %s / %s" + +#, python-format +#~ msgid "%s: _db_content => %s" +#~ msgstr "%s: _db_content => %s" + +#, python-format +#~ msgid "Calling %s %s" +#~ msgstr "Звонок %s %s" + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Команда: %s\n" +#~ "Код завершения: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "AMQP сервер %s:%d недоступен. Повторная попытка через %d секунд." + +#, python-format +#~ msgid "Putting object: %s / %s" +#~ msgstr "Вставка объекта: %s / %s" + +#, python-format +#~ msgid "Starting %s node" +#~ msgstr "Запускается нода %s" + +#, python-format +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "Хранилище данных %s недоступно. Повторная попытка через %d секунд." + +#, python-format +#~ msgid "Couldn't get IP, using 127.0.0.1 %s" +#~ msgstr "Не удалось получить IP, используем 127.0.0.1 %s" + +#, python-format +#~ msgid "Getting from %s: %s" +#~ msgstr "Получение из %s: %s" + +#, python-format +#~ msgid "" +#~ "Access key %s has had %d failed authentications and will be locked out for " +#~ "%d minutes." +#~ msgstr "" +#~ "Ключ доступа %s имеет %d неудачных попыток аутентификации и будет " +#~ "заблокирован на %d минут." + +#, python-format +#~ msgid "Authenticated Request For %s:%s)" +#~ msgstr "Запрос аутентификации для %s:%s)" @@ -7,2129 +7,2876 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" "PO-Revision-Date: 2011-02-03 22:02+0000\n" "Last-Translator: Wladimir Rossinski <Unknown>\n" "Language-Team: Ukrainian <uk@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-05 05:36+0000\n" -"X-Generator: Launchpad (build 12177)\n" +"X-Launchpad-Export-Date: 2011-03-19 06:19+0000\n" +"X-Generator: Launchpad (build 12559)\n" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "Неочікувана помилка при виконанні команди." + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "Необроблене виключення" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "Обслуговування %s" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "" + +#: ../nova/twistd.py:266 +#, python-format +msgid "Starting %s" +msgstr "Запускається %s" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "Ім'я файлу секретного ключа" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "Шлях до збережених ключів" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "Неочікувана помилка при виконанні команди." +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:78 #, python-format -msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"Команда: %s\n" -"Код завершення: %s\n" -"Stdout: %r\n" -"Stderr: %r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "Необроблене виключення" +msgid "check_instance_lock: decorating: |%s|" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:80 #, python-format -msgid "(%s) publish (key: %s) %s" +msgid "" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:84 #, python-format -msgid "Publishing to route %s" +msgid "check_instance_lock: locked: |%s|" msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Declaring queue %s" -msgstr "Оголошення черги %s" +msgid "check_instance_lock: admin: |%s|" +msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring exchange %s" -msgstr "Оголошення точки обміну %s" +msgid "check_instance_lock: executing: |%s|" +msgstr "" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Binding %s to %s with key %s" +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" msgstr "" -#: nova/fakerabbit.py:120 +#: ../nova/compute/manager.py:180 #, python-format -msgid "Getting from %s: %s" -msgstr "Отримання з %s: %s" +msgid "instance %s: starting..." +msgstr "" -#: nova/rpc.py:92 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "AMQP сервер %s:%d недоступний. Спроба під'єднання через %d секунд." +msgid "instance %s: Failed to spawn" +msgstr "" -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." -msgstr "Не вдалось під'єднатися до серверу AMQP після %d спроб. Вимкнення." +msgid "Terminating instance %s" +msgstr "" -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "Оновлено з'єднання до черги" +#: ../nova/compute/manager.py:255 +#, python-format +msgid "Deallocating address %s" +msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" msgstr "" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:282 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "Rebooting instance %s" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:287 #, python-format -msgid "received %s" -msgstr "отримано %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:311 #, python-format -msgid "no method for message: %s" -msgstr "без порядку для повідомлень: %s" +msgid "instance %s: snapshotting" +msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:316 #, python-format -msgid "No method for message: %s" -msgstr "Без порядку для повідомлень: %s" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:332 #, python-format -msgid "Returning exception %s to caller" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:335 #, python-format -msgid "unpacked context: %s" +msgid "instance %s: setting admin password" msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "Створення асинхронного виклику..." +#: ../nova/compute/manager.py:353 +#, python-format +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:362 #, python-format -msgid "MSG_ID is %s" -msgstr "MSG_ID %s" +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:372 #, python-format -msgid "response %s" -msgstr "відповідь %s" +msgid "instance %s: rescuing" +msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:387 #, python-format -msgid "topic is %s" -msgstr "заголовок %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:406 #, python-format -msgid "message %s" -msgstr "повідомлення %s" +msgid "instance %s: pausing" +msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:423 #, python-format -msgid "Starting %s node" +msgid "instance %s: unpausing" msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" +#: ../nova/compute/manager.py:440 +#, python-format +msgid "instance %s: retrieving diagnostics" msgstr "" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" msgstr "" -#: nova/service.py:208 -msgid "model server went away" +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:503 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." +msgid "instance %s: unlocking" msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Serving %s" -msgstr "Обслуговування %s" +msgid "instance %s: getting locked state" +msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" +#: ../nova/compute/manager.py:526 +#, python-format +msgid "instance %s: reset network" msgstr "" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" +msgid "Get console output for instance %s" msgstr "" -#: nova/twistd.py:268 +#: ../nova/compute/manager.py:543 #, python-format -msgid "Starting %s" -msgstr "Запускається %s" +msgid "instance %s: getting ajax console" +msgstr "" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Inner Exception: %s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" msgstr "" -#: nova/utils.py:54 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Class %s cannot be found" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" msgstr "" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Fetching %s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" msgstr "" -#: nova/utils.py:125 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Running cmd (subprocess): %s" +msgid "Detaching volume from unknown instance %s" msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Result was %s" +msgid "Host %s is not alive" +msgstr "" + +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" msgstr "" -#: nova/utils.py:171 +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "debug in callback: %s" +msgid "Host %s not available" +msgstr "" + +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" msgstr "" -#: nova/utils.py:176 +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 #, python-format -msgid "Running %s" -msgstr "Запускається %s" +msgid "Re-exporting %s volumes" +msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "Не вдалось отримати IP, використовуючи 127.0.0.1 %s" +msgid "volume %s: skipping export" +msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Invalid backend: %s" +msgid "volume %s: creating" msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:108 #, python-format -msgid "backend %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "Занадто багато невдалих аутентифікацій." +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:123 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/volume/manager.py:136 #, python-format -msgid "Authentication Failure: %s" +msgid "volume %s: removing export" msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/volume/manager.py:138 #, python-format -msgid "Authenticated Request For %s:%s)" +msgid "volume %s: deleting" msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/volume/manager.py:147 #, python-format -msgid "action: %s" +msgid "volume %s: deleted successfully" msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "arg: %s\t\tval: %s" +msgid "%(text)s: _db_content => %(content)s" msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" +msgid "xenapi.fake does not have an implementation for %s" msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "NotFound raised: %s" +msgid "Calling %(localname)s %(impl)s" msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "ApiError raised: %s" +msgid "Calling getter %s" msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "Unexpected error raised: %s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." msgstr "" -#: nova/api/ec2/admin.py:84 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Creating new user: %s" +msgid "Need to watch instance %s until it's running..." msgstr "" -#: nova/api/ec2/admin.py:92 -#, python-format -msgid "Deleting user: %s" +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Adding role %s to user %s for project %s" +msgid "Starting VLAN inteface %s" msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Adding sitewide role %s to user %s" +msgid "Starting Bridge interface for %s" msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Removing role %s from user %s for project %s" +msgid "Hupping dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Getting x509 for user: %s on project: %s" +msgid "Pid %d is stale, relaunching radvd" msgstr "" -#: nova/api/ec2/admin.py:159 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Create project %s managed by %s" +msgid "Killing dnsmasq threw %s" msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:58 #, python-format -msgid "Delete project: %s" -msgstr "Вилучити проект: %s" +msgid "Inner Exception: %s" +msgstr "" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:59 #, python-format -msgid "Adding user %s to project %s" -msgstr "Долучення користувача %s до проекту %s" +msgid "Class %s cannot be found" +msgstr "" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:118 #, python-format -msgid "Removing user %s from project %s" -msgstr "Вилучення користувача %s з проекту %s" +msgid "Fetching %s" +msgstr "" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/utils.py:130 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" +msgid "Running cmd (subprocess): %s" msgstr "" -#: nova/api/ec2/cloud.py:117 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format -msgid "Generating root CA: %s" +msgid "Result was %s" msgstr "" -#: nova/api/ec2/cloud.py:277 +#: ../nova/utils.py:159 #, python-format -msgid "Create key pair %s" +msgid "Running cmd (SSH): %s" msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/utils.py:217 #, python-format -msgid "Delete key pair %s" +msgid "debug in callback: %s" msgstr "" -#: nova/api/ec2/cloud.py:357 +#: ../nova/utils.py:222 #, python-format -msgid "%s is not a valid ipProtocol" -msgstr "%s не допустимий ipProtocol" - -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" -msgstr "Невірний діапазон портів" +msgid "Running %s" +msgstr "Запускається %s" -#: nova/api/ec2/cloud.py:392 +#: ../nova/utils.py:262 #, python-format -msgid "Revoke security group ingress %s" +msgid "Link Local address is not found.:%s" msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." +#: ../nova/utils.py:265 +#, python-format +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" msgstr "" -#: nova/api/ec2/cloud.py:421 +#: ../nova/utils.py:363 #, python-format -msgid "Authorize security group ingress %s" +msgid "Invalid backend: %s" msgstr "" -#: nova/api/ec2/cloud.py:432 +#: ../nova/utils.py:374 #, python-format -msgid "This rule already exists in group %s" -msgstr "Це правило вже існує в групі %s" +msgid "backend %s" +msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/fakerabbit.py:49 #, python-format -msgid "Create Security Group %s" +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:463 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "group %s already exists" +msgid "Publishing to route %s" msgstr "" -#: nova/api/ec2/cloud.py:475 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "Delete security group %s" -msgstr "Вилучити групу безпеки %s" +msgid "Declaring queue %s" +msgstr "Оголошення черги %s" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "Get console output for instance %s" -msgstr "" +msgid "Declaring exchange %s" +msgstr "Оголошення точки обміну %s" -#: nova/api/ec2/cloud.py:543 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "Create volume of %s GB" -msgstr "Створити розділ на %s ГБ" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" +msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Detach volume %s" -msgstr "Від'єднати том %s" +msgid "Created VM %s..." +msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:138 +#, python-format +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:168 #, python-format -msgid "Release address %s" +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Associate address %s to instance %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Disassociate address %s" +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#: ../nova/virt/xenapi/vm_utils.py:197 +#, python-format +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:209 #, python-format -msgid "Reboot instance %r" +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "De-registering image %s" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "Registered image %s with id %s" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "attribute not supported: %s" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:794 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "invalid id: %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:272 +#, python-format +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" -msgstr "лише група \"всі\" підтримується" +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" +msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:812 +#: ../nova/virt/xenapi/vm_utils.py:332 #, python-format -msgid "Updating image %s publicity" +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "Caught error: %s" +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:361 +#, python-format +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 #, python-format -msgid "Compute.api::lock %s" +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Compute.api::unlock %s" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "Compute.api::get_lock %s" +msgid "Running pygrub against %s" msgstr "" -#: nova/api/openstack/servers.py:224 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Compute.api::pause %s" +msgid "Found Xen kernel %s" msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Compute.api::unpause %s" +msgid "duplicate name found: %s" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "compute.api::suspend %s" +msgid "VDI %s is still available" msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "compute.api::resume %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "User %s already exists" -msgstr "Користувач %s вже існує" +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "Re-scanning SR %s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "User \"%s\" not found" -msgstr "Користувач \"%s\" не знайдено" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." +msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Project \"%s\" not found" -msgstr "Проект \"%s\" не знайдено" +msgid "No VDIs found for VM %s" +msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "LDAP object for %s doesn't exist" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "User %s is already a member of the group %s" +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "Looking up user: %r" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "Failed authorization for access key %s" +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "No user found for access key %s" +msgid "Destroying VBD for VDI %s done." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Using project name = user name (%s)" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format -msgid "No project called %s could be found" +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "User %s is not a member of project %s" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "Invalid signature for user %s" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "The %s role can not be found" +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "The %s role is global only" +msgid "No service for %(host)s, %(binary)s" msgstr "" -#: nova/auth/manager.py:412 -#, python-format -msgid "Adding role %s to user %s in project %s" +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "Created project %s with manager %s" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "modifying project %s" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 #, python-format -msgid "Remove user %s from project %s" +msgid "No network for id %s" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "Deleting project %s" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Deleting user %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Access Key change for user %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Secret Key change for user %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "No vpn data for project %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "Launching VPN for %s" +msgid "No user for id %s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No user for access key %s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "Instance %d has no host" +msgid "No project with id %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/compute/api.py:94 +#: ../nova/db/sqlalchemy/api.py:1996 #, python-format msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/db/sqlalchemy/api.py:2057 #, python-format -msgid "Going to run %s instances..." +msgid "on instance %s" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "Going to try and terminate %s" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "Instance %d was not found during terminate" +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Instance %d is already being terminated" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "Connecting to libvirt: %s" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "No disk at %s" msgstr "" -#: nova/compute/disk.py:136 +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Failed to load partition: %s" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "Unknown instance type: %s" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "instance %s: is running" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %s: booted" msgstr "" -#: nova/compute/manager.py:77 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/manager.py:82 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "virsh said: %r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/virt/libvirt_conn.py:456 +#, python-format +msgid "Contents of file %(fpath)s: %(contents)r" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "instance %s: starting..." +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "instance %s: Failed to spawn" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "Terminating instance %s" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:217 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "Disassociating address %s" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:230 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 #, python-format -msgid "Deallocating address %s" +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "instance %s: finished toXML method" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" msgstr "" -#: nova/compute/manager.py:257 +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "Rebooting instance %s" +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:260 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/network/api.py:39 #, python-format -msgid "instance %s: snapshotting" +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/virt/images.py:70 #, python-format -msgid "instance %s: rescuing" +msgid "Finished retreving %(url)s -- placed in %(path)s" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "instance %s: unrescuing" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "instance %s: pausing" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:352 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "instance %s: unpausing" +msgid "Generating root CA: %s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/api/ec2/cloud.py:303 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Create key pair %s" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:311 #, python-format -msgid "instance %s: suspending" +msgid "Delete key pair %s" msgstr "" -#: nova/compute/manager.py:401 +#: ../nova/api/ec2/cloud.py:386 #, python-format -msgid "instance %s: resuming" +msgid "%s is not a valid ipProtocol" +msgstr "%s не допустимий ipProtocol" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "Невірний діапазон портів" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "" + +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: locking" +msgid "Authorize security group ingress %s" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:464 #, python-format -msgid "instance %s: unlocking" +msgid "This rule already exists in group %s" +msgstr "Це правило вже існує в групі %s" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:495 #, python-format -msgid "instance %s: getting locked state" +msgid "group %s already exists" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:507 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Delete security group %s" +msgstr "Вилучити групу безпеки %s" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" +msgstr "Створити розділ на %s ГБ" + +#: ../nova/api/ec2/cloud.py:612 +#, python-format +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Detach volume %s" +msgstr "Від'єднати том %s" + +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "updating %s..." +msgid "Disassociate address %s" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:867 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "De-registering image %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:427 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Found instance: %s" +msgid "attribute not supported: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../nova/api/ec2/cloud.py:890 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" +msgstr "" + +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" +msgstr "лише група \"всі\" підтримується" + +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "No service for id %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:52 #, python-format -msgid "No service for %s, %s" +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:57 #, python-format -msgid "No floating ip for address %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:59 #, python-format -msgid "No instance for id %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:64 #, python-format -msgid "Instance %s not found" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:69 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../bin/nova-api.py:83 #, python-format -msgid "No network for id %s" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../bin/nova-api.py:89 #, python-format -msgid "No network for bridge %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "Token %s does not exist" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No quota for project_id %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "No volume for id %s" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "Volume %s not found" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No export device found for volume %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "No target id found for volume %s" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No security group with id %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No security group named %s for project: %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No secuity group rule with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No user for id %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No user for access key %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No project with id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Instance not present %s" msgstr "" -#: nova/image/glance.py:97 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Image %s could not be found" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" +msgstr "" + +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" msgstr "" -#: nova/network/linux_net.py:176 +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Starting VLAN inteface %s" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:186 +#: ../nova/virt/xenapi/vmops.py:564 #, python-format -msgid "Starting Bridge interface for %s" +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/virt/xenapi/vmops.py:760 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/tests/test_compute.py:154 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" msgstr "" -#: nova/network/manager.py:190 +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "Leasing IP %s" +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:194 +#: ../nova/image/s3.py:99 #, python-format -msgid "IP %s leased that isn't associated" +msgid "Image %s could not be found" msgstr "" -#: nova/network/manager.py:197 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "Занадто багато невдалих аутентифікацій." + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s leased to bad mac %s vs %s" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:205 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Authentication Failure: %s" msgstr "" -#: nova/network/manager.py:214 +#: ../nova/api/ec2/__init__.py:182 #, python-format -msgid "IP %s released that isn't associated" +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "action: %s" msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:209 #, python-format -msgid "IP %s released that was not leased" +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:314 #, python-format -msgid "Unknown S3 value type %r" +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:326 +#, python-format +msgid "NotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:329 #, python-format -msgid "List keys for bucket %s" +msgid "ApiError raised: %s" msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/api/ec2/__init__.py:338 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "Unexpected error raised: %s" +msgstr "" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:84 #, python-format -msgid "Creating bucket %s" +msgid "User %s already exists" +msgstr "Користувач %s вже існує" + +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Getting object: %s / %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "Користувач \"%s\" не знайдено" + +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "Project \"%s\" not found" +msgstr "Проект \"%s\" не знайдено" + +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Putting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Deleting object: %s / %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "updating %s..." msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/volume/san.py:67 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Updating user fields on image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "Caught error: %s" +msgstr "" + +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 #, python-format -msgid "Deleted image: %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:141 #, python-format -msgid "Casting to %s %s for %s" +msgid "Error starting xvp: %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" +msgstr "" + +#: ../bin/nova-manage.py:451 +msgid "host" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 -#, python-format -msgid "instance %s: deleting instance files %s" +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:160 #, python-format -msgid "No disk at %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:187 +#, python-format +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:292 #, python-format -msgid "instance %s: rebooted" +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:296 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:301 #, python-format -msgid "instance %s: rescued" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/compute/api.py:481 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:98 #, python-format -msgid "instance %s: is running" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: booted" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "Не вдалось під'єднатися до серверу AMQP після %d спроб. Вимкнення." + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "Оновлено з'єднання до черги" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:159 #, python-format -msgid "instance %s: failed to boot" +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:395 +#: ../nova/rpc.py:178 #, python-format -msgid "virsh said: %r" +msgid "received %s" +msgstr "отримано %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "без порядку для повідомлень: %s" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "Без порядку для повідомлень: %s" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "Створення асинхронного виклику..." + +#: ../nova/rpc.py:316 #, python-format -msgid "data: %r, fpath: %r" +msgid "MSG_ID is %s" +msgstr "MSG_ID %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/rpc.py:364 #, python-format -msgid "Contents of file %s: %r" +msgid "response %s" +msgstr "відповідь %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "заголовок %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "повідомлення %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/volume/driver.py:87 #, python-format -msgid "instance %s: Creating image" +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:220 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "FAKE AOE: %s" +msgstr "" + +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../nova/volume/driver.py:359 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: starting toXML method" +msgid "Sheepdog is not working: %s" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" +msgstr "" + +#: ../nova/wsgi.py:68 #, python-format -msgid "instance %s: finished toXML method" +msgid "Starting %(arg0)s on %(host)s:%(port)s" msgstr "" -#: nova/virt/xenapi_conn.py:113 +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:123 +#, python-format msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" +msgstr "" + +#: ../nova/virt/fake.py:239 +#, python-format +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/network/manager.py:153 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:157 +msgid "setting network host" +msgstr "" + +#: ../nova/network/manager.py:212 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:216 #, python-format -msgid "Got exception: %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:220 #, python-format -msgid "%s: _db_content => %s" +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:228 +#, python-format +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:233 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:237 #, python-format -msgid "Calling %s %s" +msgid "IP %s released that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:241 #, python-format -msgid "Calling getter %s" +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:340 +#: ../nova/network/manager.py:244 #, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "VDI %s is still available" +msgid "Unknown S3 value type %r" +msgstr "" + +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "VHD %s has parent %s" +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Re-scanning SR %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "No VDIs found for VM %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Instance not present %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "suspend: instance not present %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "resume: instance not present %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Instance not found %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Introducing %s..." +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Introduced %s as %s." +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Forgetting SR %s ... " +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:287 #, python-format -msgid "Forgetting SR %s done." +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" +msgstr "" + +#: ../nova/auth/manager.py:380 +msgid "Must specify project" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:414 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Detach_volume: %s, %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to locate volume %s" +msgid "Deleting project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/manager.py:650 #, python-format -msgid "Unable to detach volume %s" +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Deleting user %s" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/manager.py:669 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Access Key change for user %s" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/auth/manager.py:673 +#, python-format +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/auth/manager.py:722 +#, python-format +msgid "No vpn data for project %s" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "" + +#: ../nova/auth/ldapdriver.py:174 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "LDAP user %s already exists" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:205 #, python-format -msgid "volume group %s doesn't exist" +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:348 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:472 #, python-format -msgid "FAKE ISCSI: %s" +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Re-exporting %s volumes" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "volume %s: creating" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "volume %s: creating export" +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "volume %s: created successfully" +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/auth/ldapdriver.py:524 +#, python-format +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/auth/ldapdriver.py:528 +#, python-format +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "volume %s: removing export" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "volume %s: deleting" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/auth/ldapdriver.py:564 #, python-format -msgid "volume %s: deleted successfully" +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: ../nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:97 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:131 +#, python-format +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "Вилучити проект: %s" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "AMQP сервер %s:%d недоступний. Спроба під'єднання через %d секунд." + +#, python-format +#~ msgid "Couldn't get IP, using 127.0.0.1 %s" +#~ msgstr "Не вдалось отримати IP, використовуючи 127.0.0.1 %s" + +#, python-format +#~ msgid "Removing user %s from project %s" +#~ msgstr "Вилучення користувача %s з проекту %s" + +#, python-format +#~ msgid "Adding user %s to project %s" +#~ msgstr "Долучення користувача %s до проекту %s" + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "Команда: %s\n" +#~ "Код завершення: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" + +#, python-format +#~ msgid "Getting from %s: %s" +#~ msgstr "Отримання з %s: %s" diff --git a/po/zh_CN.po b/po/zh_CN.po index a39383497..9690356f5 100644 --- a/po/zh_CN.po +++ b/po/zh_CN.po @@ -7,2129 +7,2934 @@ msgid "" msgstr "" "Project-Id-Version: nova\n" "Report-Msgid-Bugs-To: FULL NAME <EMAIL@ADDRESS>\n" -"POT-Creation-Date: 2011-01-10 11:25-0800\n" -"PO-Revision-Date: 2011-02-14 02:26+0000\n" -"Last-Translator: Winston Dillon <Unknown>\n" +"POT-Creation-Date: 2011-02-21 10:03-0500\n" +"PO-Revision-Date: 2011-04-07 05:01+0000\n" +"Last-Translator: ben <Unknown>\n" "Language-Team: Chinese (Simplified) <zh_CN@li.org>\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"X-Launchpad-Export-Date: 2011-02-15 05:12+0000\n" -"X-Generator: Launchpad (build 12351)\n" +"X-Launchpad-Export-Date: 2011-04-08 05:28+0000\n" +"X-Generator: Launchpad (build 12735)\n" -#: nova/twistd.py:268 +#: ../nova/twistd.py:266 #, python-format msgid "Starting %s" -msgstr "正在启动 %s" +msgstr "启动 %s 中" -#: nova/crypto.py:46 +#: ../nova/scheduler/chance.py:37 ../nova/scheduler/zone.py:55 +#: ../nova/scheduler/simple.py:75 ../nova/scheduler/simple.py:110 +#: ../nova/scheduler/simple.py:122 +msgid "No hosts found" +msgstr "未找到主机" + +#: ../nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "运行命令时出现错误" + +#: ../nova/exception.py:36 +#, python-format +msgid "" +"%(description)s\n" +"Command: %(cmd)s\n" +"Exit code: %(exit_code)s\n" +"Stdout: %(stdout)r\n" +"Stderr: %(stderr)r" +msgstr "" + +#: ../nova/exception.py:107 +msgid "DB exception wrapped" +msgstr "" + +#. exc_type, exc_value, exc_traceback = sys.exc_info() +#: ../nova/exception.py:120 +msgid "Uncaught exception" +msgstr "未捕获异常" + +#: ../nova/volume/api.py:45 +#, python-format +msgid "Quota exceeeded for %(pid)s, tried to create %(size)sG volume" +msgstr "" + +#: ../nova/volume/api.py:47 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %sG" +msgstr "卷磁盘配额已耗尽,不能创建 %sG 大小的卷" + +#: ../nova/volume/api.py:71 ../nova/volume/api.py:96 +msgid "Volume status must be available" +msgstr "卷组状态必须可获取" + +#: ../nova/volume/api.py:98 +msgid "Volume is already attached" +msgstr "卷已挂载" + +#: ../nova/volume/api.py:104 +msgid "Volume is already detached" +msgstr "卷已卸载" + +#: ../nova/api/openstack/servers.py:72 +msgid "Failed to read private ip" +msgstr "获取内网IP失败" + +#: ../nova/api/openstack/servers.py:79 +msgid "Failed to read public ip(s)" +msgstr "获取外网IP失败" + +#: ../nova/api/openstack/servers.py:152 +#, python-format +msgid "%(param)s property not found for image %(_image_id)s" +msgstr "" + +#: ../nova/api/openstack/servers.py:168 +msgid "No keypairs defined" +msgstr "未定义密钥对" + +#: ../nova/api/openstack/servers.py:238 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:253 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:267 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:281 +#, python-format +msgid "Compute.api::reset_network %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:292 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:303 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:314 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: ../nova/api/openstack/servers.py:325 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: ../nova/twistd.py:157 +msgid "Wrong number of arguments." +msgstr "错误参数个数。" + +#: ../nova/twistd.py:209 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "pidfile %s 不存在,守护进程是否运行?\n" + +#: ../nova/twistd.py:221 +msgid "No such process" +msgstr "没有该进程" + +#: ../nova/twistd.py:230 ../nova/service.py:224 +#, python-format +msgid "Serving %s" +msgstr "正在为 %s 服务" + +#: ../nova/twistd.py:262 ../nova/service.py:225 +msgid "Full set of FLAGS:" +msgstr "FLAGS全集:" + +#: ../nova/virt/xenapi/volumeops.py:48 ../nova/virt/xenapi/volumeops.py:101 +#: ../nova/db/sqlalchemy/api.py:731 ../nova/virt/libvirt_conn.py:741 +#: ../nova/api/ec2/__init__.py:317 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#. NOTE: No Resource Pool concept so far +#: ../nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %(instance_name)s, %(device_path)s, %(mountpoint)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:80 +#, python-format +msgid "Unable to use SR %(sr_ref)s for instance %(instance_name)s" +msgstr "" + +#: ../nova/virt/xenapi/volumeops.py:91 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "无法挂载卷到虚拟机 %s" + +#: ../nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Mountpoint %(mountpoint)s attached to instance %(instance_name)s" +msgstr "挂载点 %(mountpoint)s 挂载到虚拟机 %(instance_name)s" + +#. Detach VBD from VM +#: ../nova/virt/xenapi/volumeops.py:104 +#, python-format +msgid "Detach_volume: %(instance_name)s, %(mountpoint)s" +msgstr "卸载_volume: %(instance_name)s, %(mountpoint)s" + +#: ../nova/virt/xenapi/volumeops.py:112 +#, python-format +msgid "Unable to locate volume %s" +msgstr "无法找到 %s 卷" + +#: ../nova/virt/xenapi/volumeops.py:120 +#, python-format +msgid "Unable to detach volume %s" +msgstr "无法卸载 %s 卷" + +#: ../nova/virt/xenapi/volumeops.py:127 +#, python-format +msgid "Mountpoint %(mountpoint)s detached from instance %(instance_name)s" +msgstr "挂载点 %(mountpoint)s 从虚拟机 %(instance_name)s 卸载" + +#: ../nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "未知的虚拟机类型:%s" + +#: ../nova/crypto.py:46 msgid "Filename of root CA" msgstr "根证书文件名" -#: nova/crypto.py:49 +#: ../nova/crypto.py:49 msgid "Filename of private key" msgstr "私钥文件名" -#: nova/crypto.py:51 +#: ../nova/crypto.py:51 msgid "Filename of root Certificate Revokation List" msgstr "" -#: nova/crypto.py:53 +#: ../nova/crypto.py:53 msgid "Where we keep our keys" msgstr "保存密钥的位置" -#: nova/crypto.py:55 +#: ../nova/crypto.py:55 msgid "Where we keep our root CA" msgstr "保存根证书的位置" -#: nova/crypto.py:57 +#: ../nova/crypto.py:57 msgid "Should we use a CA for each project?" msgstr "是否所有项目都是用证书授权(CA)?" -#: nova/crypto.py:61 +#: ../nova/crypto.py:61 #, python-format msgid "Subject for certificate for users, %s for project, user, timestamp" msgstr "用户证书的标题,%s依次分别为项目,用户,时间戳" -#: nova/crypto.py:66 +#: ../nova/crypto.py:66 #, python-format msgid "Subject for certificate for projects, %s for project, timestamp" msgstr "项目证书的标题,%s依次分别为项目,时间戳" -#: nova/crypto.py:71 +#: ../nova/crypto.py:71 #, python-format msgid "Subject for certificate for vpns, %s for project, timestamp" msgstr "VPN证书的标题,%s依次分别为项目,时间戳" -#: nova/crypto.py:258 +#: ../nova/crypto.py:258 #, python-format msgid "Flags path: %s" msgstr "Flag所在路径:%s" -#: nova/exception.py:33 -msgid "Unexpected error while running command." -msgstr "运行命令时出现了意外错误。" +#: ../nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %(topic)s %(host)s for %(method)s" +msgstr "" -#: nova/exception.py:36 +#: ../nova/compute/manager.py:78 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: ../nova/compute/manager.py:80 #, python-format msgid "" -"%s\n" -"Command: %s\n" -"Exit code: %s\n" -"Stdout: %r\n" -"Stderr: %r" -msgstr "" -"%s\n" -"命令:%s\n" -"退出代码:%s\n" -"标准输出(stdout):%r\n" -"标准错误(stderr):%r" - -#: nova/exception.py:86 -msgid "Uncaught exception" -msgstr "未捕获异常" +"check_instance_lock: arguments: |%(self)s| |%(context)s| |%(instance_id)s|" +msgstr "" -#: nova/fakerabbit.py:48 +#: ../nova/compute/manager.py:84 #, python-format -msgid "(%s) publish (key: %s) %s" -msgstr "(%s)发布(键值:%s)%s" +msgid "check_instance_lock: locked: |%s|" +msgstr "" -#: nova/fakerabbit.py:53 +#: ../nova/compute/manager.py:86 #, python-format -msgid "Publishing to route %s" -msgstr "发布并路由到 %s" +msgid "check_instance_lock: admin: |%s|" +msgstr "" -#: nova/fakerabbit.py:83 +#: ../nova/compute/manager.py:91 #, python-format -msgid "Declaring queue %s" -msgstr "正在声明队列%s" +msgid "check_instance_lock: executing: |%s|" +msgstr "" -#: nova/fakerabbit.py:89 +#: ../nova/compute/manager.py:95 #, python-format -msgid "Declaring exchange %s" -msgstr "正在声明交换(exchange)%s" +msgid "check_instance_lock: not executing |%s|" +msgstr "" -#: nova/fakerabbit.py:95 +#: ../nova/compute/manager.py:179 +msgid "Instance has already been created" +msgstr "虚拟机已经创建" + +#: ../nova/compute/manager.py:180 #, python-format -msgid "Binding %s to %s with key %s" -msgstr "将%s绑定到%s(以%s键值)" +msgid "instance %s: starting..." +msgstr "虚拟机 %s :启动" -#: nova/fakerabbit.py:120 +#. pylint: disable=W0702 +#: ../nova/compute/manager.py:219 #, python-format -msgid "Getting from %s: %s" -msgstr "从%s获得如下内容:%s" +msgid "instance %s: Failed to spawn" +msgstr "" -#: nova/rpc.py:92 +#: ../nova/compute/manager.py:233 ../nova/tests/test_cloud.py:286 #, python-format -msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." -msgstr "位于%s:%d的AMQP服务器不可用。%d秒后重试。" +msgid "Terminating instance %s" +msgstr "" -#: nova/rpc.py:99 +#: ../nova/compute/manager.py:255 #, python-format -msgid "Unable to connect to AMQP server after %d tries. Shutting down." -msgstr "已尝试%d次,均无法连接到AMQP服务器。关闭中。" +msgid "Deallocating address %s" +msgstr "" -#: nova/rpc.py:118 -msgid "Reconnected to queue" -msgstr "重新与队列建立连接" +#: ../nova/compute/manager.py:268 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" -#: nova/rpc.py:125 -msgid "Failed to fetch message from queue" -msgstr "从队列获取数据失败" +#: ../nova/compute/manager.py:282 +#, python-format +msgid "Rebooting instance %s" +msgstr "重启虚拟机 %s" -#: nova/rpc.py:155 +#: ../nova/compute/manager.py:287 #, python-format -msgid "Initing the Adapter Consumer for %s" +msgid "" +"trying to reboot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" msgstr "" -#: nova/rpc.py:170 +#: ../nova/compute/manager.py:311 #, python-format -msgid "received %s" -msgstr "已接收 %s" +msgid "instance %s: snapshotting" +msgstr "" -#: nova/rpc.py:183 +#: ../nova/compute/manager.py:316 #, python-format -msgid "no method for message: %s" -msgstr "没有适用于消息%s的方法" +msgid "" +"trying to snapshot a non-running instance: %(instance_id)s (state: %(state)s " +"expected: %(running)s)" +msgstr "" -#: nova/rpc.py:184 +#: ../nova/compute/manager.py:332 #, python-format -msgid "No method for message: %s" -msgstr "没有适用于消息%s的方法" +msgid "" +"trying to reset the password on a non-running instance: %(instance_id)s " +"(state: %(instance_state)s expected: %(expected_state)s)" +msgstr "" -#: nova/rpc.py:245 +#: ../nova/compute/manager.py:335 #, python-format -msgid "Returning exception %s to caller" -msgstr "返回%s异常给调用者" +msgid "instance %s: setting admin password" +msgstr "虚拟机 %s:设置管理员密码" -#: nova/rpc.py:286 +#: ../nova/compute/manager.py:353 #, python-format -msgid "unpacked context: %s" +msgid "" +"trying to inject a file into a non-running instance: %(instance_id)s (state: " +"%(instance_state)s expected: %(expected_state)s)" msgstr "" -#: nova/rpc.py:305 -msgid "Making asynchronous call..." -msgstr "产生异步调用中……" +#: ../nova/compute/manager.py:362 +#, python-format +msgid "instance %(nm)s: injecting file to %(plain_path)s" +msgstr "" -#: nova/rpc.py:308 +#: ../nova/compute/manager.py:372 #, python-format -msgid "MSG_ID is %s" -msgstr "消息ID(MSG_ID)是 %s" +msgid "instance %s: rescuing" +msgstr "" -#: nova/rpc.py:356 +#: ../nova/compute/manager.py:387 #, python-format -msgid "response %s" -msgstr "回复 %s" +msgid "instance %s: unrescuing" +msgstr "" -#: nova/rpc.py:365 +#: ../nova/compute/manager.py:406 #, python-format -msgid "topic is %s" -msgstr "话题是 %s" +msgid "instance %s: pausing" +msgstr "" -#: nova/rpc.py:366 +#: ../nova/compute/manager.py:423 #, python-format -msgid "message %s" -msgstr "消息 %s" +msgid "instance %s: unpausing" +msgstr "" -#: nova/service.py:157 +#: ../nova/compute/manager.py:440 #, python-format -msgid "Starting %s node" -msgstr "启动%s节点" +msgid "instance %s: retrieving diagnostics" +msgstr "" -#: nova/service.py:169 -msgid "Service killed that has no database entry" -msgstr "因无数据库记录,服务已被中止" +#: ../nova/compute/manager.py:453 +#, python-format +msgid "instance %s: suspending" +msgstr "虚拟机 %s:挂起" -#: nova/service.py:190 -msgid "The service database object disappeared, Recreating it." +#: ../nova/compute/manager.py:472 +#, python-format +msgid "instance %s: resuming" msgstr "" -#: nova/service.py:202 -msgid "Recovered model server connection!" -msgstr "与模型服务器(model server)的连接已恢复!" +#: ../nova/compute/manager.py:491 +#, python-format +msgid "instance %s: locking" +msgstr "" -#: nova/service.py:208 -msgid "model server went away" -msgstr "失去与模型服务器的连接" +#: ../nova/compute/manager.py:503 +#, python-format +msgid "instance %s: unlocking" +msgstr "" -#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#: ../nova/compute/manager.py:513 #, python-format -msgid "Data store %s is unreachable. Trying again in %d seconds." -msgstr "数据储存服务%s不可用。%d秒之后继续尝试。" +msgid "instance %s: getting locked state" +msgstr "" -#: nova/service.py:232 nova/twistd.py:232 +#: ../nova/compute/manager.py:526 #, python-format -msgid "Serving %s" -msgstr "正在为%s服务" +msgid "instance %s: reset network" +msgstr "" -#: nova/service.py:234 nova/twistd.py:264 -msgid "Full set of FLAGS:" -msgstr "FLAGS全集:" +#: ../nova/compute/manager.py:535 ../nova/api/ec2/cloud.py:515 +#, python-format +msgid "Get console output for instance %s" +msgstr "获取虚拟机 %s 控制台输出" -#: nova/twistd.py:211 +#: ../nova/compute/manager.py:543 #, python-format -msgid "pidfile %s does not exist. Daemon not running?\n" -msgstr "pidfile %s不存在。后台服务没有运行?\n" +msgid "instance %s: getting ajax console" +msgstr "虚拟机 %s :获取ajax控制台" -#: nova/utils.py:53 +#: ../nova/compute/manager.py:553 #, python-format -msgid "Inner Exception: %s" -msgstr "内层异常:%s" +msgid "" +"instance %(instance_id)s: attaching volume %(volume_id)s to %(mountpoint)s" +msgstr "" -#: nova/utils.py:54 +#. pylint: disable=W0702 +#. NOTE(vish): The inline callback eats the exception info so we +#. log the traceback here and reraise the same +#. ecxception below. +#: ../nova/compute/manager.py:569 #, python-format -msgid "Class %s cannot be found" -msgstr "无法找到%s类" +msgid "instance %(instance_id)s: attach failed %(mountpoint)s, removing" +msgstr "" -#: nova/utils.py:113 +#: ../nova/compute/manager.py:585 #, python-format -msgid "Fetching %s" -msgstr "正在抓取%s" +msgid "" +"Detach volume %(volume_id)s from mountpoint %(mp)s on instance " +"%(instance_id)s" +msgstr "" -#: nova/utils.py:125 +#: ../nova/compute/manager.py:588 #, python-format -msgid "Running cmd (subprocess): %s" -msgstr "正在运行(在子进程中)运行命令:%s" +msgid "Detaching volume from unknown instance %s" +msgstr "" -#: nova/utils.py:138 +#: ../nova/scheduler/simple.py:53 #, python-format -msgid "Result was %s" -msgstr "运行结果为 %s" +msgid "Host %s is not alive" +msgstr "" -#: nova/utils.py:171 +#: ../nova/scheduler/simple.py:65 +msgid "All hosts have too many cores" +msgstr "" + +#: ../nova/scheduler/simple.py:87 #, python-format -msgid "debug in callback: %s" -msgstr "回调中debug:%s" +msgid "Host %s not available" +msgstr "" -#: nova/utils.py:176 +#: ../nova/scheduler/simple.py:99 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: ../nova/scheduler/simple.py:119 +msgid "All hosts have too many networks" +msgstr "" + +#: ../nova/volume/manager.py:85 #, python-format -msgid "Running %s" -msgstr "正在运行 %s" +msgid "Re-exporting %s volumes" +msgstr "" -#: nova/utils.py:207 +#: ../nova/volume/manager.py:90 #, python-format -msgid "Couldn't get IP, using 127.0.0.1 %s" -msgstr "不能获取IP,将使用 127.0.0.1 %s" +msgid "volume %s: skipping export" +msgstr "" -#: nova/utils.py:289 +#: ../nova/volume/manager.py:96 #, python-format -msgid "Invalid backend: %s" -msgstr "无效的后台:%s" +msgid "volume %s: creating" +msgstr "" -#: nova/utils.py:300 +#: ../nova/volume/manager.py:108 #, python-format -msgid "backend %s" -msgstr "后台 %s" +msgid "volume %(vol_name)s: creating lv of size %(vol_size)sG" +msgstr "" -#: nova/api/ec2/__init__.py:133 -msgid "Too many failed authentications." -msgstr "较多失败的认证" +#: ../nova/volume/manager.py:112 +#, python-format +msgid "volume %s: creating export" +msgstr "" -#: nova/api/ec2/__init__.py:142 +#: ../nova/volume/manager.py:123 #, python-format -msgid "" -"Access key %s has had %d failed authentications and will be locked out for " -"%d minutes." -msgstr "访问键 %s时,存在%d个失败的认证,将于%d分钟后解锁" +msgid "volume %s: created successfully" +msgstr "" + +#: ../nova/volume/manager.py:131 +msgid "Volume is still attached" +msgstr "" + +#: ../nova/volume/manager.py:133 +msgid "Volume is not local to this node" +msgstr "" -#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#: ../nova/volume/manager.py:136 #, python-format -msgid "Authentication Failure: %s" -msgstr "认证失败:%s" +msgid "volume %s: removing export" +msgstr "" -#: nova/api/ec2/__init__.py:190 +#: ../nova/volume/manager.py:138 #, python-format -msgid "Authenticated Request For %s:%s)" -msgstr "为%s:%s申请认证" +msgid "volume %s: deleting" +msgstr "" -#: nova/api/ec2/__init__.py:227 +#: ../nova/volume/manager.py:147 #, python-format -msgid "action: %s" -msgstr "执行: %s" +msgid "volume %s: deleted successfully" +msgstr "" -#: nova/api/ec2/__init__.py:229 +#: ../nova/virt/xenapi/fake.py:74 #, python-format -msgid "arg: %s\t\tval: %s" -msgstr "键为: %s\t\t值为: %s" +msgid "%(text)s: _db_content => %(content)s" +msgstr "" + +#: ../nova/virt/xenapi/fake.py:304 ../nova/virt/xenapi/fake.py:404 +#: ../nova/virt/xenapi/fake.py:422 ../nova/virt/xenapi/fake.py:478 +msgid "Raising NotImplemented" +msgstr "" -#: nova/api/ec2/__init__.py:301 +#: ../nova/virt/xenapi/fake.py:306 #, python-format -msgid "Unauthorized request for controller=%s and action=%s" -msgstr "对控制器=%s及动作=%s未经授权" +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" -#: nova/api/ec2/__init__.py:339 +#: ../nova/virt/xenapi/fake.py:341 #, python-format -msgid "NotFound raised: %s" -msgstr "引起没有找到的错误: %s" +msgid "Calling %(localname)s %(impl)s" +msgstr "" -#: nova/api/ec2/__init__.py:342 +#: ../nova/virt/xenapi/fake.py:346 #, python-format -msgid "ApiError raised: %s" -msgstr "引发了Api错误: %s" +msgid "Calling getter %s" +msgstr "" -#: nova/api/ec2/__init__.py:349 +#: ../nova/virt/xenapi/fake.py:406 #, python-format -msgid "Unexpected error raised: %s" -msgstr "引发了意外的错误:%s" +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" -#: nova/api/ec2/__init__.py:354 -msgid "An unknown error has occurred. Please try your request again." -msgstr "发生了一个未知的错误. 请重试你的请求." +#: ../nova/tests/test_cloud.py:256 +msgid "Can't test instances without a real virtual env." +msgstr "" -#: nova/api/ec2/admin.py:84 +#: ../nova/tests/test_cloud.py:268 #, python-format -msgid "Creating new user: %s" -msgstr "创建新用户: %s" +msgid "Need to watch instance %s until it's running..." +msgstr "" -#: nova/api/ec2/admin.py:92 -#, python-format -msgid "Deleting user: %s" -msgstr "删除用户: %s" +#: ../nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" -#: nova/api/ec2/admin.py:114 +#: ../nova/network/linux_net.py:187 #, python-format -msgid "Adding role %s to user %s for project %s" -msgstr "正将%s角色赋予用户%s(在工程%s中)" +msgid "Starting VLAN inteface %s" +msgstr "" -#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#: ../nova/network/linux_net.py:208 #, python-format -msgid "Adding sitewide role %s to user %s" -msgstr "增加站点范围的 %s角色给用户 %s" +msgid "Starting Bridge interface for %s" +msgstr "" -#: nova/api/ec2/admin.py:122 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:314 #, python-format -msgid "Removing role %s from user %s for project %s" -msgstr "正将角色%s从用户%s在工程%s中移除" +msgid "Hupping dnsmasq threw %s" +msgstr "" -#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#: ../nova/network/linux_net.py:316 #, python-format -msgid "Removing sitewide role %s from user %s" +msgid "Pid %d is stale, relaunching dnsmasq" msgstr "" -#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 -msgid "operation must be add or remove" -msgstr "操作必须为增加或删除" +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:358 +#, python-format +msgid "killing radvd threw %s" +msgstr "" -#: nova/api/ec2/admin.py:142 +#: ../nova/network/linux_net.py:360 #, python-format -msgid "Getting x509 for user: %s on project: %s" -msgstr "为用户 %s从工程%s中获取 x509" +msgid "Pid %d is stale, relaunching radvd" +msgstr "" -#: nova/api/ec2/admin.py:159 +#. pylint: disable=W0703 +#: ../nova/network/linux_net.py:449 #, python-format -msgid "Create project %s managed by %s" -msgstr "创建工程%s,此工程由%s管理" +msgid "Killing dnsmasq threw %s" +msgstr "" -#: nova/api/ec2/admin.py:170 +#: ../nova/utils.py:58 #, python-format -msgid "Delete project: %s" -msgstr "删除工程%s" +msgid "Inner Exception: %s" +msgstr "内层异常:%s" -#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#: ../nova/utils.py:59 #, python-format -msgid "Adding user %s to project %s" -msgstr "增加用户%s到%s工程" +msgid "Class %s cannot be found" +msgstr "无法找到 %s 类" -#: nova/api/ec2/admin.py:188 +#: ../nova/utils.py:118 #, python-format -msgid "Removing user %s from project %s" -msgstr "正将用户%s从工程%s中移除" +msgid "Fetching %s" +msgstr "正在抓取 %s" -#: nova/api/ec2/apirequest.py:95 +#: ../nova/utils.py:130 #, python-format -msgid "Unsupported API request: controller = %s,action = %s" -msgstr "不支持的API请求: 控制器 = %s,执行 = %s" +msgid "Running cmd (subprocess): %s" +msgstr "正在运行(在子进程中)运行命令:%s" -#: nova/api/ec2/cloud.py:117 +#: ../nova/utils.py:143 ../nova/utils.py:183 #, python-format -msgid "Generating root CA: %s" -msgstr "生成根证书: %s" +msgid "Result was %s" +msgstr "运行结果为 %s" -#: nova/api/ec2/cloud.py:277 +#: ../nova/utils.py:159 #, python-format -msgid "Create key pair %s" -msgstr "创建键值对 %s" +msgid "Running cmd (SSH): %s" +msgstr "" -#: nova/api/ec2/cloud.py:285 +#: ../nova/utils.py:217 #, python-format -msgid "Delete key pair %s" -msgstr "删除键值对 %s" +msgid "debug in callback: %s" +msgstr "回调中debug:%s" -#: nova/api/ec2/cloud.py:357 +#: ../nova/utils.py:222 #, python-format -msgid "%s is not a valid ipProtocol" -msgstr "%s是无效的IP协议" +msgid "Running %s" +msgstr "正在运行 %s" -#: nova/api/ec2/cloud.py:361 -msgid "Invalid port range" -msgstr "端口范围无效" +#: ../nova/utils.py:262 +#, python-format +msgid "Link Local address is not found.:%s" +msgstr "" -#: nova/api/ec2/cloud.py:392 +#: ../nova/utils.py:265 #, python-format -msgid "Revoke security group ingress %s" -msgstr "撤销输入安全组 %s" +msgid "Couldn't get Link Local IP of %(interface)s :%(ex)s" +msgstr "" -#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 -msgid "No rule for the specified parameters." -msgstr "对给定的参数无特定规则。" +#: ../nova/utils.py:363 +#, python-format +msgid "Invalid backend: %s" +msgstr "无效的后台:%s" -#: nova/api/ec2/cloud.py:421 +#: ../nova/utils.py:374 #, python-format -msgid "Authorize security group ingress %s" -msgstr "验证输入安全组 %s" +msgid "backend %s" +msgstr "后台 %s" -#: nova/api/ec2/cloud.py:432 +#: ../nova/fakerabbit.py:49 #, python-format -msgid "This rule already exists in group %s" -msgstr "这条规则已经存在安全组%s中。" +msgid "(%(nm)s) publish (key: %(routing_key)s) %(message)s" +msgstr "" -#: nova/api/ec2/cloud.py:460 +#: ../nova/fakerabbit.py:54 #, python-format -msgid "Create Security Group %s" -msgstr "创建安全组%s" +msgid "Publishing to route %s" +msgstr "发布并路由到 %s" -#: nova/api/ec2/cloud.py:463 +#: ../nova/fakerabbit.py:84 #, python-format -msgid "group %s already exists" -msgstr "安全组%s已经存在" +msgid "Declaring queue %s" +msgstr "正在声明队列 %s" -#: nova/api/ec2/cloud.py:475 +#: ../nova/fakerabbit.py:90 #, python-format -msgid "Delete security group %s" -msgstr "删除安全组 %s" +msgid "Declaring exchange %s" +msgstr "正在声明交换(exchange) %s" -#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#: ../nova/fakerabbit.py:96 #, python-format -msgid "Get console output for instance %s" +msgid "Binding %(queue)s to %(exchange)s with key %(routing_key)s" msgstr "" -#: nova/api/ec2/cloud.py:543 +#: ../nova/fakerabbit.py:121 #, python-format -msgid "Create volume of %s GB" +msgid "Getting from %(queue)s: %(message)s" msgstr "" -#: nova/api/ec2/cloud.py:567 +#: ../nova/virt/xenapi/vm_utils.py:135 ../nova/virt/hyperv.py:171 #, python-format -msgid "Attach volume %s to instacne %s at %s" +msgid "Created VM %s..." msgstr "" -#: nova/api/ec2/cloud.py:579 +#: ../nova/virt/xenapi/vm_utils.py:138 #, python-format -msgid "Detach volume %s" +msgid "Created VM %(instance_name)s as %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:686 -msgid "Allocate address" +#: ../nova/virt/xenapi/vm_utils.py:168 +#, python-format +msgid "Creating VBD for VM %(vm_ref)s, VDI %(vdi_ref)s ... " msgstr "" -#: nova/api/ec2/cloud.py:691 +#: ../nova/virt/xenapi/vm_utils.py:171 #, python-format -msgid "Release address %s" +msgid "Created VBD %(vbd_ref)s for VM %(vm_ref)s, VDI %(vdi_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:696 +#: ../nova/virt/xenapi/vm_utils.py:187 #, python-format -msgid "Associate address %s to instance %s" +msgid "VBD not found in instance %s" msgstr "" -#: nova/api/ec2/cloud.py:703 +#: ../nova/virt/xenapi/vm_utils.py:197 #, python-format -msgid "Disassociate address %s" +msgid "Unable to unplug VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:730 -msgid "Going to start terminating instances" +#: ../nova/virt/xenapi/vm_utils.py:209 +#, python-format +msgid "Unable to destroy VBD %s" msgstr "" -#: nova/api/ec2/cloud.py:738 +#: ../nova/virt/xenapi/vm_utils.py:224 #, python-format -msgid "Reboot instance %r" +msgid "Creating VIF for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:775 +#: ../nova/virt/xenapi/vm_utils.py:227 #, python-format -msgid "De-registering image %s" +msgid "Created VIF %(vif_ref)s for VM %(vm_ref)s, network %(network_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:783 +#: ../nova/virt/xenapi/vm_utils.py:246 #, python-format -msgid "Registered image %s with id %s" +msgid "" +"Created VDI %(vdi_ref)s (%(name_label)s, %(virtual_size)s, %(read_only)s) on " +"%(sr_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vm_utils.py:258 #, python-format -msgid "attribute not supported: %s" +msgid "Snapshotting VM %(vm_ref)s with label '%(label)s'..." msgstr "" -#: nova/api/ec2/cloud.py:794 +#: ../nova/virt/xenapi/vm_utils.py:272 #, python-format -msgid "invalid id: %s" +msgid "Created snapshot %(template_vm_ref)s from VM %(vm_ref)s." msgstr "" -#: nova/api/ec2/cloud.py:807 -msgid "user or group not specified" +#: ../nova/virt/xenapi/vm_utils.py:286 +#, python-format +msgid "Asking xapi to upload %(vdi_uuids)s as ID %(image_id)s" msgstr "" -#: nova/api/ec2/cloud.py:809 -msgid "only group \"all\" is supported" +#: ../nova/virt/xenapi/vm_utils.py:327 +#, python-format +msgid "Size for image %(image)s:%(virtual_size)d" msgstr "" -#: nova/api/ec2/cloud.py:811 -msgid "operation_type must be add or remove" +#: ../nova/virt/xenapi/vm_utils.py:332 +#, python-format +msgid "Glance image %s" msgstr "" -#: nova/api/ec2/cloud.py:812 +#. we need to invoke a plugin for copying VDI's +#. content into proper path +#: ../nova/virt/xenapi/vm_utils.py:342 #, python-format -msgid "Updating image %s publicity" +msgid "Copying VDI %s to /boot/guest on dom0" msgstr "" -#: nova/api/ec2/metadatarequesthandler.py:75 +#: ../nova/virt/xenapi/vm_utils.py:352 #, python-format -msgid "Failed to get metadata for ip: %s" +msgid "Kernel/Ramdisk VDI %s destroyed" msgstr "" -#: nova/api/openstack/__init__.py:70 +#: ../nova/virt/xenapi/vm_utils.py:361 #, python-format -msgid "Caught error: %s" +msgid "Asking xapi to fetch %(url)s as %(access)s" msgstr "" -#: nova/api/openstack/__init__.py:86 -msgid "Including admin operations in API." +#: ../nova/virt/xenapi/vm_utils.py:386 ../nova/virt/xenapi/vm_utils.py:402 +#, python-format +msgid "Looking up vdi %s for PV kernel" msgstr "" -#: nova/api/openstack/servers.py:184 +#: ../nova/virt/xenapi/vm_utils.py:397 #, python-format -msgid "Compute.api::lock %s" +msgid "PV Kernel in VDI:%s" msgstr "" -#: nova/api/openstack/servers.py:199 +#: ../nova/virt/xenapi/vm_utils.py:405 #, python-format -msgid "Compute.api::unlock %s" +msgid "Running pygrub against %s" msgstr "" -#: nova/api/openstack/servers.py:213 +#: ../nova/virt/xenapi/vm_utils.py:411 #, python-format -msgid "Compute.api::get_lock %s" +msgid "Found Xen kernel %s" msgstr "" -#: nova/api/openstack/servers.py:224 -#, python-format -msgid "Compute.api::pause %s" +#: ../nova/virt/xenapi/vm_utils.py:413 +msgid "No Xen kernel found. Booting HVM." msgstr "" -#: nova/api/openstack/servers.py:235 +#: ../nova/virt/xenapi/vm_utils.py:425 ../nova/virt/hyperv.py:431 #, python-format -msgid "Compute.api::unpause %s" +msgid "duplicate name found: %s" msgstr "" -#: nova/api/openstack/servers.py:246 +#: ../nova/virt/xenapi/vm_utils.py:442 #, python-format -msgid "compute.api::suspend %s" +msgid "VDI %s is still available" msgstr "" -#: nova/api/openstack/servers.py:257 +#: ../nova/virt/xenapi/vm_utils.py:463 #, python-format -msgid "compute.api::resume %s" +msgid "(VM_UTILS) xenserver vm state -> |%s|" msgstr "" -#: nova/auth/dbdriver.py:84 +#: ../nova/virt/xenapi/vm_utils.py:465 #, python-format -msgid "User %s already exists" +msgid "(VM_UTILS) xenapi power_state -> |%s|" msgstr "" -#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#: ../nova/virt/xenapi/vm_utils.py:525 #, python-format -msgid "Project can't be created because manager %s doesn't exist" +msgid "VHD %(vdi_uuid)s has parent %(parent_ref)s" msgstr "" -#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#: ../nova/virt/xenapi/vm_utils.py:542 #, python-format -msgid "Project can't be created because project %s already exists" +msgid "Re-scanning SR %s" msgstr "" -#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#: ../nova/virt/xenapi/vm_utils.py:567 #, python-format -msgid "Project can't be modified because manager %s doesn't exist" +msgid "" +"VHD coalesce attempts exceeded (%(counter)d > %(max_attempts)d), giving up..." msgstr "" -#: nova/auth/dbdriver.py:245 +#: ../nova/virt/xenapi/vm_utils.py:574 #, python-format -msgid "User \"%s\" not found" +msgid "" +"Parent %(parent_uuid)s doesn't match original parent " +"%(original_parent_uuid)s, waiting for coalesce..." msgstr "" -#: nova/auth/dbdriver.py:248 +#: ../nova/virt/xenapi/vm_utils.py:590 #, python-format -msgid "Project \"%s\" not found" +msgid "No VDIs found for VM %s" msgstr "" -#: nova/auth/fakeldap.py:33 -msgid "Attempted to instantiate singleton" +#: ../nova/virt/xenapi/vm_utils.py:594 +#, python-format +msgid "Unexpected number of VDIs (%(num_vdis)s) found for VM %(vm_ref)s" msgstr "" -#: nova/auth/ldapdriver.py:181 +#: ../nova/virt/xenapi/vm_utils.py:653 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:188 #, python-format -msgid "LDAP object for %s doesn't exist" +msgid "Creating VBD for VDI %s ... " msgstr "" -#: nova/auth/ldapdriver.py:218 +#: ../nova/virt/xenapi/vm_utils.py:655 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:190 #, python-format -msgid "Project can't be created because user %s doesn't exist" +msgid "Creating VBD for VDI %s done." msgstr "" -#: nova/auth/ldapdriver.py:478 +#: ../nova/virt/xenapi/vm_utils.py:657 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:192 #, python-format -msgid "User %s is already a member of the group %s" +msgid "Plugging VBD %s ... " msgstr "" -#: nova/auth/ldapdriver.py:507 +#: ../nova/virt/xenapi/vm_utils.py:659 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:194 #, python-format -msgid "" -"Attempted to remove the last member of a group. Deleting the group at %s " -"instead." +msgid "Plugging VBD %s done." msgstr "" -#: nova/auth/ldapdriver.py:528 +#: ../nova/virt/xenapi/vm_utils.py:661 #, python-format -msgid "Group at dn %s doesn't exist" +msgid "VBD %(vbd)s plugged as %(orig_dev)s" msgstr "" -#: nova/auth/manager.py:259 +#: ../nova/virt/xenapi/vm_utils.py:664 #, python-format -msgid "Looking up user: %r" +msgid "VBD %(vbd)s plugged into wrong dev, remapping to %(dev)s" msgstr "" -#: nova/auth/manager.py:263 +#: ../nova/virt/xenapi/vm_utils.py:668 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:197 #, python-format -msgid "Failed authorization for access key %s" +msgid "Destroying VBD for VDI %s ... " msgstr "" -#: nova/auth/manager.py:264 +#: ../nova/virt/xenapi/vm_utils.py:671 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:200 #, python-format -msgid "No user found for access key %s" +msgid "Destroying VBD for VDI %s done." msgstr "" -#: nova/auth/manager.py:270 +#: ../nova/virt/xenapi/vm_utils.py:683 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:211 +msgid "VBD.unplug successful first time." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:688 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:216 +msgid "VBD.unplug rejected: retrying..." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:692 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:220 +msgid "VBD.unplug successful eventually." +msgstr "" + +#: ../nova/virt/xenapi/vm_utils.py:695 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:223 #, python-format -msgid "Using project name = user name (%s)" +msgid "Ignoring XenAPI.Failure in VBD.unplug: %s" msgstr "" -#: nova/auth/manager.py:275 +#: ../nova/virt/xenapi/vm_utils.py:704 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:66 #, python-format -msgid "failed authorization: no project named %s (user=%s)" +msgid "Ignoring XenAPI.Failure %s" msgstr "" -#: nova/auth/manager.py:277 +#: ../nova/virt/xenapi/vm_utils.py:735 #, python-format -msgid "No project called %s could be found" +msgid "" +"Writing partition table %(primary_first)d %(primary_last)d to %(dest)s..." msgstr "" -#: nova/auth/manager.py:281 +#: ../nova/virt/xenapi/vm_utils.py:747 #, python-format -msgid "Failed authorization: user %s not admin and not member of project %s" +msgid "Writing partition table %s done." msgstr "" -#: nova/auth/manager.py:283 +#: ../nova/tests/test_rpc.py:89 #, python-format -msgid "User %s is not a member of project %s" +msgid "Nested received %(queue)s, %(value)s" msgstr "" -#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#: ../nova/tests/test_rpc.py:95 #, python-format -msgid "Invalid signature for user %s" +msgid "Nested return %s" msgstr "" -#: nova/auth/manager.py:293 nova/auth/manager.py:304 -msgid "Signature does not match" +#: ../nova/tests/test_rpc.py:120 ../nova/tests/test_rpc.py:126 +#, python-format +msgid "Received %s" msgstr "" -#: nova/auth/manager.py:374 -msgid "Must specify project" +#: ../nova/db/sqlalchemy/api.py:44 +msgid "Use of empty request context is deprecated" msgstr "" -#: nova/auth/manager.py:408 +#: ../nova/db/sqlalchemy/api.py:133 #, python-format -msgid "The %s role can not be found" +msgid "No service for id %s" msgstr "" -#: nova/auth/manager.py:410 +#: ../nova/db/sqlalchemy/api.py:251 #, python-format -msgid "The %s role is global only" +msgid "No service for %(host)s, %(binary)s" msgstr "" -#: nova/auth/manager.py:412 -#, python-format -msgid "Adding role %s to user %s in project %s" +#: ../nova/db/sqlalchemy/api.py:592 +msgid "No fixed ips defined" msgstr "" -#: nova/auth/manager.py:438 +#: ../nova/db/sqlalchemy/api.py:608 #, python-format -msgid "Removing role %s from user %s on project %s" +msgid "No floating ip for address %s" msgstr "" -#: nova/auth/manager.py:505 +#: ../nova/db/sqlalchemy/api.py:629 #, python-format -msgid "Created project %s with manager %s" +msgid "No address for instance %s" msgstr "" -#: nova/auth/manager.py:523 +#: ../nova/db/sqlalchemy/api.py:961 #, python-format -msgid "modifying project %s" +msgid "no keypair for user %(user_id)s, name %(name)s" msgstr "" -#: nova/auth/manager.py:553 +#: ../nova/db/sqlalchemy/api.py:1076 ../nova/db/sqlalchemy/api.py:1156 #, python-format -msgid "Remove user %s from project %s" +msgid "No network for id %s" msgstr "" -#: nova/auth/manager.py:581 +#: ../nova/db/sqlalchemy/api.py:1086 +msgid "No networks defined" +msgstr "" + +#: ../nova/db/sqlalchemy/api.py:1115 #, python-format -msgid "Deleting project %s" +msgid "No network for bridge %s" msgstr "" -#: nova/auth/manager.py:637 +#: ../nova/db/sqlalchemy/api.py:1129 ../nova/db/sqlalchemy/api.py:1142 #, python-format -msgid "Created user %s (admin: %r)" +msgid "No network for instance %s" msgstr "" -#: nova/auth/manager.py:645 +#: ../nova/db/sqlalchemy/api.py:1277 #, python-format -msgid "Deleting user %s" +msgid "Token %s does not exist" msgstr "" -#: nova/auth/manager.py:655 +#: ../nova/db/sqlalchemy/api.py:1302 #, python-format -msgid "Access Key change for user %s" +msgid "No quota for project_id %s" msgstr "" -#: nova/auth/manager.py:657 +#: ../nova/db/sqlalchemy/api.py:1455 ../nova/db/sqlalchemy/api.py:1501 +#: ../nova/api/ec2/__init__.py:323 #, python-format -msgid "Secret Key change for user %s" +msgid "Volume %s not found" msgstr "" -#: nova/auth/manager.py:659 +#: ../nova/db/sqlalchemy/api.py:1514 #, python-format -msgid "Admin status set to %r for user %s" +msgid "No export device found for volume %s" msgstr "" -#: nova/auth/manager.py:708 +#: ../nova/db/sqlalchemy/api.py:1527 #, python-format -msgid "No vpn data for project %s" +msgid "No target id found for volume %s" msgstr "" -#: nova/cloudpipe/pipelib.py:45 -msgid "Template for script to run on cloudpipe instance boot" +#: ../nova/db/sqlalchemy/api.py:1572 +#, python-format +msgid "No security group with id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:48 -msgid "Network to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1589 +#, python-format +msgid "No security group named %(group_name)s for project: %(project_id)s" msgstr "" -#: nova/cloudpipe/pipelib.py:51 -msgid "Netmask to push into openvpn config" +#: ../nova/db/sqlalchemy/api.py:1682 +#, python-format +msgid "No secuity group rule with id %s" msgstr "" -#: nova/cloudpipe/pipelib.py:97 +#: ../nova/db/sqlalchemy/api.py:1756 #, python-format -msgid "Launching VPN for %s" +msgid "No user for id %s" msgstr "" -#: nova/compute/api.py:67 +#: ../nova/db/sqlalchemy/api.py:1772 #, python-format -msgid "Instance %d was not found in get_network_topic" +msgid "No user for access key %s" msgstr "" -#: nova/compute/api.py:73 +#: ../nova/db/sqlalchemy/api.py:1834 #, python-format -msgid "Instance %d has no host" +msgid "No project with id %s" msgstr "" -#: nova/compute/api.py:92 +#: ../nova/db/sqlalchemy/api.py:1979 #, python-format -msgid "Quota exceeeded for %s, tried to run %s instances" +msgid "No console pool with id %(pool_id)s" msgstr "" -#: nova/compute/api.py:94 +#: ../nova/db/sqlalchemy/api.py:1996 #, python-format msgid "" -"Instance quota exceeded. You can only run %s more instances of this type." +"No console pool of type %(console_type)s for compute host %(compute_host)s " +"on proxy host %(host)s" msgstr "" -#: nova/compute/api.py:109 -msgid "Creating a raw instance" +#: ../nova/db/sqlalchemy/api.py:2035 +#, python-format +msgid "No console for instance %(instance_id)s in pool %(pool_id)s" msgstr "" -#: nova/compute/api.py:156 +#: ../nova/db/sqlalchemy/api.py:2057 #, python-format -msgid "Going to run %s instances..." +msgid "on instance %s" msgstr "" -#: nova/compute/api.py:180 +#: ../nova/db/sqlalchemy/api.py:2058 #, python-format -msgid "Casting to scheduler for %s/%s's instance %s" +msgid "No console with id %(console_id)s %(idesc)s" msgstr "" -#: nova/compute/api.py:279 +#: ../nova/db/sqlalchemy/api.py:2078 ../nova/db/sqlalchemy/api.py:2097 #, python-format -msgid "Going to try and terminate %s" +msgid "No zone with id %(zone_id)s" msgstr "" -#: nova/compute/api.py:283 +#: ../nova/virt/libvirt_conn.py:160 #, python-format -msgid "Instance %d was not found during terminate" +msgid "Checking state of %s" msgstr "" -#: nova/compute/api.py:288 +#: ../nova/virt/libvirt_conn.py:165 #, python-format -msgid "Instance %d is already being terminated" +msgid "Current state of %(name)s was %(state)s." msgstr "" -#: nova/compute/api.py:450 +#: ../nova/virt/libvirt_conn.py:183 #, python-format -msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgid "Connecting to libvirt: %s" msgstr "" -#: nova/compute/api.py:465 -msgid "Volume isn't attached to anything!" +#: ../nova/virt/libvirt_conn.py:196 +msgid "Connection to libvirt broke" msgstr "" -#: nova/compute/disk.py:71 +#: ../nova/virt/libvirt_conn.py:258 #, python-format -msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgid "instance %(instance_name)s: deleting instance files %(target)s" msgstr "" -#: nova/compute/disk.py:75 +#: ../nova/virt/libvirt_conn.py:283 #, python-format -msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgid "Invalid device path %s" msgstr "" -#: nova/compute/disk.py:128 +#: ../nova/virt/libvirt_conn.py:313 #, python-format -msgid "Could not attach image to loopback: %s" +msgid "No disk at %s" msgstr "" -#: nova/compute/disk.py:136 -#, python-format -msgid "Failed to load partition: %s" +#: ../nova/virt/libvirt_conn.py:320 +msgid "Instance snapshotting is not supported for libvirtat this time" msgstr "" -#: nova/compute/disk.py:158 +#: ../nova/virt/libvirt_conn.py:336 #, python-format -msgid "Failed to mount filesystem: %s" +msgid "instance %s: rebooted" msgstr "" -#: nova/compute/instance_types.py:41 +#: ../nova/virt/libvirt_conn.py:339 #, python-format -msgid "Unknown instance type: %s" +msgid "_wait_for_reboot failed: %s" msgstr "" -#: nova/compute/manager.py:69 +#: ../nova/virt/libvirt_conn.py:382 #, python-format -msgid "check_instance_lock: decorating: |%s|" +msgid "instance %s: rescued" msgstr "" -#: nova/compute/manager.py:71 +#: ../nova/virt/libvirt_conn.py:385 #, python-format -msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgid "_wait_for_rescue failed: %s" msgstr "" -#: nova/compute/manager.py:75 +#: ../nova/virt/libvirt_conn.py:411 #, python-format -msgid "check_instance_lock: locked: |%s|" +msgid "instance %s: is running" msgstr "" -#: nova/compute/manager.py:77 +#: ../nova/virt/libvirt_conn.py:422 #, python-format -msgid "check_instance_lock: admin: |%s|" +msgid "instance %s: booted" msgstr "" -#: nova/compute/manager.py:82 +#: ../nova/virt/libvirt_conn.py:425 ../nova/virt/xenapi/vmops.py:186 #, python-format -msgid "check_instance_lock: executing: |%s|" +msgid "instance %s: failed to boot" msgstr "" -#: nova/compute/manager.py:86 +#: ../nova/virt/libvirt_conn.py:436 #, python-format -msgid "check_instance_lock: not executing |%s|" +msgid "virsh said: %r" msgstr "" -#: nova/compute/manager.py:157 -msgid "Instance has already been created" +#: ../nova/virt/libvirt_conn.py:440 +msgid "cool, it's a device" msgstr "" -#: nova/compute/manager.py:158 +#: ../nova/virt/libvirt_conn.py:448 #, python-format -msgid "instance %s: starting..." +msgid "data: %(data)r, fpath: %(fpath)r" msgstr "" -#: nova/compute/manager.py:197 +#: ../nova/virt/libvirt_conn.py:456 #, python-format -msgid "instance %s: Failed to spawn" +msgid "Contents of file %(fpath)s: %(contents)r" msgstr "" -#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 -#, python-format -msgid "Terminating instance %s" +#: ../nova/virt/libvirt_conn.py:489 +msgid "Unable to find an open port" msgstr "" -#: nova/compute/manager.py:217 +#: ../nova/virt/libvirt_conn.py:563 #, python-format -msgid "Disassociating address %s" +msgid "instance %s: Creating image" msgstr "" -#: nova/compute/manager.py:230 +#: ../nova/virt/libvirt_conn.py:646 #, python-format -msgid "Deallocating address %s" +msgid "instance %(inst_name)s: injecting key into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:243 +#: ../nova/virt/libvirt_conn.py:649 #, python-format -msgid "trying to destroy already destroyed instance: %s" +msgid "instance %(inst_name)s: injecting net into image %(img_id)s" msgstr "" -#: nova/compute/manager.py:257 +#. This could be a windows image, or a vmdk format disk +#: ../nova/virt/libvirt_conn.py:657 #, python-format -msgid "Rebooting instance %s" +msgid "" +"instance %(inst_name)s: ignoring error injecting data into image %(img_id)s " +"(%(e)s)" msgstr "" -#: nova/compute/manager.py:260 +#. TODO(termie): cache? +#: ../nova/virt/libvirt_conn.py:665 #, python-format -msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgid "instance %s: starting toXML method" msgstr "" -#: nova/compute/manager.py:286 +#: ../nova/virt/libvirt_conn.py:732 #, python-format -msgid "instance %s: snapshotting" +msgid "instance %s: finished toXML method" msgstr "" -#: nova/compute/manager.py:289 +#: ../nova/virt/libvirt_conn.py:751 +msgid "diagnostics are not supported for libvirt" +msgstr "" + +#: ../nova/virt/libvirt_conn.py:1225 #, python-format -msgid "" -"trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgid "Attempted to unfilter instance %s which is not filtered" msgstr "" -#: nova/compute/manager.py:301 +#: ../nova/api/ec2/metadatarequesthandler.py:76 #, python-format -msgid "instance %s: rescuing" +msgid "Failed to get metadata for ip: %s" msgstr "" -#: nova/compute/manager.py:316 +#: ../nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: ../nova/network/api.py:39 #, python-format -msgid "instance %s: unrescuing" +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: ../nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" msgstr "" -#: nova/compute/manager.py:335 +#: ../nova/tests/test_volume.py:162 #, python-format -msgid "instance %s: pausing" +msgid "Target %s allocated" msgstr "" -#: nova/compute/manager.py:352 +#: ../nova/virt/images.py:70 #, python-format -msgid "instance %s: unpausing" +msgid "Finished retreving %(url)s -- placed in %(path)s" msgstr "" -#: nova/compute/manager.py:369 +#: ../nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: ../nova/console/manager.py:70 +msgid "Adding console" +msgstr "" + +#: ../nova/console/manager.py:90 #, python-format -msgid "instance %s: retrieving diagnostics" +msgid "Tried to remove non-existant console %(console_id)s." +msgstr "" + +#: ../nova/api/direct.py:149 +msgid "not available" msgstr "" -#: nova/compute/manager.py:382 +#: ../nova/api/ec2/cloud.py:62 #, python-format -msgid "instance %s: suspending" +msgid "The key_pair %s already exists" msgstr "" -#: nova/compute/manager.py:401 +#. TODO(vish): Do this with M2Crypto instead +#: ../nova/api/ec2/cloud.py:118 #, python-format -msgid "instance %s: resuming" +msgid "Generating root CA: %s" +msgstr "生成根证书: %s" + +#: ../nova/api/ec2/cloud.py:303 +#, python-format +msgid "Create key pair %s" +msgstr "创建键值对 %s" + +#: ../nova/api/ec2/cloud.py:311 +#, python-format +msgid "Delete key pair %s" +msgstr "删除键值对 %s" + +#: ../nova/api/ec2/cloud.py:386 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "%s 是无效的IP协议" + +#: ../nova/api/ec2/cloud.py:390 +msgid "Invalid port range" +msgstr "端口范围无效" + +#: ../nova/api/ec2/cloud.py:421 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "撤销输入安全组 %s" + +#: ../nova/api/ec2/cloud.py:430 ../nova/api/ec2/cloud.py:459 +msgid "Not enough parameters to build a valid rule." msgstr "" -#: nova/compute/manager.py:420 +#: ../nova/api/ec2/cloud.py:443 +msgid "No rule for the specified parameters." +msgstr "对给定的参数无特定规则。" + +#: ../nova/api/ec2/cloud.py:450 #, python-format -msgid "instance %s: locking" +msgid "Authorize security group ingress %s" +msgstr "验证输入安全组 %s" + +#: ../nova/api/ec2/cloud.py:464 +#, python-format +msgid "This rule already exists in group %s" +msgstr "这条规则已经存在安全组 %s 中。" + +#: ../nova/api/ec2/cloud.py:492 +#, python-format +msgid "Create Security Group %s" +msgstr "创建安全组 %s" + +#: ../nova/api/ec2/cloud.py:495 +#, python-format +msgid "group %s already exists" +msgstr "安全组 %s 已经存在" + +#: ../nova/api/ec2/cloud.py:507 +#, python-format +msgid "Delete security group %s" +msgstr "删除安全组 %s" + +#: ../nova/api/ec2/cloud.py:584 +#, python-format +msgid "Create volume of %s GB" msgstr "" -#: nova/compute/manager.py:432 +#: ../nova/api/ec2/cloud.py:612 #, python-format -msgid "instance %s: unlocking" +msgid "Attach volume %(volume_id)s to instance %(instance_id)s at %(device)s" msgstr "" -#: nova/compute/manager.py:442 +#: ../nova/api/ec2/cloud.py:629 #, python-format -msgid "instance %s: getting locked state" +msgid "Detach volume %s" msgstr "" -#: nova/compute/manager.py:462 +#: ../nova/api/ec2/cloud.py:761 +msgid "Allocate address" +msgstr "" + +#: ../nova/api/ec2/cloud.py:766 #, python-format -msgid "instance %s: attaching volume %s to %s" +msgid "Release address %s" msgstr "" -#: nova/compute/manager.py:478 +#: ../nova/api/ec2/cloud.py:771 #, python-format -msgid "instance %s: attach failed %s, removing" +msgid "Associate address %(public_ip)s to instance %(instance_id)s" msgstr "" -#: nova/compute/manager.py:493 +#: ../nova/api/ec2/cloud.py:780 #, python-format -msgid "Detach volume %s from mountpoint %s on instance %s" +msgid "Disassociate address %s" +msgstr "" + +#: ../nova/api/ec2/cloud.py:807 +msgid "Going to start terminating instances" msgstr "" -#: nova/compute/manager.py:497 +#: ../nova/api/ec2/cloud.py:815 #, python-format -msgid "Detaching volume from unknown instance %s" +msgid "Reboot instance %r" msgstr "" -#: nova/compute/monitor.py:259 +#: ../nova/api/ec2/cloud.py:867 #, python-format -msgid "updating %s..." +msgid "De-registering image %s" msgstr "" -#: nova/compute/monitor.py:289 -msgid "unexpected error during update" +#: ../nova/api/ec2/cloud.py:875 +#, python-format +msgid "Registered image %(image_location)s with id %(image_id)s" msgstr "" -#: nova/compute/monitor.py:355 +#: ../nova/api/ec2/cloud.py:882 ../nova/api/ec2/cloud.py:900 #, python-format -msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgid "attribute not supported: %s" msgstr "" -#: nova/compute/monitor.py:377 +#: ../nova/api/ec2/cloud.py:890 #, python-format -msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgid "invalid id: %s" msgstr "" -#: nova/compute/monitor.py:412 -msgid "unexpected exception getting connection" +#: ../nova/api/ec2/cloud.py:903 +msgid "user or group not specified" msgstr "" -#: nova/compute/monitor.py:427 -#, python-format -msgid "Found instance: %s" +#: ../nova/api/ec2/cloud.py:905 +msgid "only group \"all\" is supported" msgstr "" -#: nova/db/sqlalchemy/api.py:43 -msgid "Use of empty request context is deprecated" +#: ../nova/api/ec2/cloud.py:907 +msgid "operation_type must be add or remove" msgstr "" -#: nova/db/sqlalchemy/api.py:132 +#: ../nova/api/ec2/cloud.py:908 #, python-format -msgid "No service for id %s" +msgid "Updating image %s publicity" msgstr "" -#: nova/db/sqlalchemy/api.py:229 +#: ../bin/nova-api.py:52 #, python-format -msgid "No service for %s, %s" +msgid "Using paste.deploy config at: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:574 +#: ../bin/nova-api.py:57 #, python-format -msgid "No floating ip for address %s" +msgid "No paste configuration for app: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:668 +#: ../bin/nova-api.py:59 #, python-format -msgid "No instance for id %s" +msgid "" +"App Config: %(api)s\n" +"%(config)r" msgstr "" -#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 -#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#: ../bin/nova-api.py:64 #, python-format -msgid "Instance %s not found" +msgid "Running %s API" msgstr "" -#: nova/db/sqlalchemy/api.py:891 +#: ../bin/nova-api.py:69 #, python-format -msgid "no keypair for user %s, name %s" +msgid "No known API applications configured in %s." msgstr "" -#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#: ../bin/nova-api.py:83 #, python-format -msgid "No network for id %s" +msgid "Starting nova-api node (version %s)" msgstr "" -#: nova/db/sqlalchemy/api.py:1036 +#: ../bin/nova-api.py:89 #, python-format -msgid "No network for bridge %s" +msgid "No paste configuration found for: %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1050 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:84 #, python-format -msgid "No network for instance %s" +msgid "Argument %(key)s value %(value)s is too short." msgstr "" -#: nova/db/sqlalchemy/api.py:1180 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:89 #, python-format -msgid "Token %s does not exist" +msgid "Argument %(key)s value %(value)s contains invalid characters." msgstr "" -#: nova/db/sqlalchemy/api.py:1205 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:94 #, python-format -msgid "No quota for project_id %s" +msgid "Argument %(key)s value %(value)s starts with a hyphen." msgstr "" -#: nova/db/sqlalchemy/api.py:1356 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:102 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:130 #, python-format -msgid "No volume for id %s" +msgid "Argument %s is required." msgstr "" -#: nova/db/sqlalchemy/api.py:1401 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:117 #, python-format -msgid "Volume %s not found" +msgid "" +"Argument %(key)s may not take value %(value)s. Valid values are ['true', " +"'false']." msgstr "" -#: nova/db/sqlalchemy/api.py:1413 +#: ../plugins/xenserver/xenapi/etc/xapi.d/plugins/pluginlib_nova.py:163 #, python-format -msgid "No export device found for volume %s" +msgid "" +"Created VDI %(vdi_ref)s (%(label)s, %(size)s, %(read_only)s) on %(sr_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1426 +#: ../nova/virt/xenapi/vmops.py:67 #, python-format -msgid "No target id found for volume %s" +msgid "Attempted to create non-unique name %s" msgstr "" -#: nova/db/sqlalchemy/api.py:1471 +#: ../nova/virt/xenapi/vmops.py:73 #, python-format -msgid "No security group with id %s" +msgid "instance %(name)s: not enough free memory" msgstr "" -#: nova/db/sqlalchemy/api.py:1488 +#: ../nova/virt/xenapi/vmops.py:148 #, python-format -msgid "No security group named %s for project: %s" +msgid "Starting VM %s..." msgstr "" -#: nova/db/sqlalchemy/api.py:1576 +#: ../nova/virt/xenapi/vmops.py:151 #, python-format -msgid "No secuity group rule with id %s" +msgid "Spawning VM %(instance_name)s created %(vm_ref)s." msgstr "" -#: nova/db/sqlalchemy/api.py:1650 +#: ../nova/virt/xenapi/vmops.py:162 #, python-format -msgid "No user for id %s" +msgid "Invalid value for onset_files: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1666 +#: ../nova/virt/xenapi/vmops.py:167 #, python-format -msgid "No user for access key %s" +msgid "Injecting file path: '%s'" msgstr "" -#: nova/db/sqlalchemy/api.py:1728 +#: ../nova/virt/xenapi/vmops.py:180 #, python-format -msgid "No project with id %s" +msgid "Instance %s: booted" msgstr "" -#: nova/image/glance.py:78 +#: ../nova/virt/xenapi/vmops.py:232 #, python-format -msgid "Parallax returned HTTP error %d from request for /images" +msgid "Instance not present %s" msgstr "" -#: nova/image/glance.py:97 +#. TODO(sirp): Add quiesce and VSS locking support when Windows support +#. is added +#: ../nova/virt/xenapi/vmops.py:261 #, python-format -msgid "Parallax returned HTTP error %d from request for /images/detail" +msgid "Starting snapshot for VM %s" msgstr "" -#: nova/image/s3.py:82 +#: ../nova/virt/xenapi/vmops.py:269 #, python-format -msgid "Image %s could not be found" +msgid "Unable to Snapshot %(vm_ref)s: %(exc)s" msgstr "" -#: nova/network/api.py:39 +#: ../nova/virt/xenapi/vmops.py:280 #, python-format -msgid "Quota exceeeded for %s, tried to allocate address" +msgid "Finished snapshot and upload for VM %s" msgstr "" -#: nova/network/api.py:42 -msgid "Address quota exceeded. You cannot allocate any more addresses" +#: ../nova/virt/xenapi/vmops.py:356 +#, python-format +msgid "VM %(vm)s already halted, skipping shutdown..." msgstr "" -#: nova/network/linux_net.py:176 -#, python-format -msgid "Starting VLAN inteface %s" +#: ../nova/virt/xenapi/vmops.py:389 +msgid "Removing kernel/ramdisk files" msgstr "" -#: nova/network/linux_net.py:186 -#, python-format -msgid "Starting Bridge interface for %s" +#: ../nova/virt/xenapi/vmops.py:399 +msgid "kernel/ramdisk files removed" msgstr "" -#: nova/network/linux_net.py:254 +#: ../nova/virt/xenapi/vmops.py:561 #, python-format -msgid "Hupping dnsmasq threw %s" +msgid "" +"TIMEOUT: The call to %(method)s timed out. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:256 +#: ../nova/virt/xenapi/vmops.py:564 #, python-format -msgid "Pid %d is stale, relaunching dnsmasq" +msgid "" +"NOT IMPLEMENTED: The call to %(method)s is not supported by the agent. VM " +"id=%(instance_id)s; args=%(strargs)s" msgstr "" -#: nova/network/linux_net.py:334 +#: ../nova/virt/xenapi/vmops.py:569 #, python-format -msgid "Killing dnsmasq threw %s" +msgid "" +"The call to %(method)s returned an error: %(e)s. VM id=%(instance_id)s; " +"args=%(strargs)s" msgstr "" -#: nova/network/manager.py:135 -msgid "setting network host" +#: ../nova/virt/xenapi/vmops.py:760 +#, python-format +msgid "OpenSSL error: %s" msgstr "" -#: nova/network/manager.py:190 +#: ../nova/tests/test_compute.py:148 #, python-format -msgid "Leasing IP %s" +msgid "Running instances: %s" msgstr "" -#: nova/network/manager.py:194 +#: ../nova/tests/test_compute.py:154 #, python-format -msgid "IP %s leased that isn't associated" +msgid "After terminating instances: %s" msgstr "" -#: nova/network/manager.py:197 -#, python-format -msgid "IP %s leased to bad mac %s vs %s" +#: ../nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" msgstr "" -#: nova/network/manager.py:205 +#: ../nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: ../nova/cloudpipe/pipelib.py:97 #, python-format -msgid "IP %s leased that was already deallocated" +msgid "Launching VPN for %s" +msgstr "" + +#: ../nova/db/sqlalchemy/migration.py:35 +msgid "python-migrate is not installed. Exiting." msgstr "" -#: nova/network/manager.py:214 +#: ../nova/image/s3.py:99 #, python-format -msgid "IP %s released that isn't associated" +msgid "Image %s could not be found" msgstr "" -#: nova/network/manager.py:217 +#: ../nova/api/ec2/__init__.py:121 +msgid "Too many failed authentications." +msgstr "认证失败过多" + +#: ../nova/api/ec2/__init__.py:131 #, python-format -msgid "IP %s released from bad mac %s vs %s" +msgid "" +"Access key %(access_key)s has had %(failures)d failed authentications and " +"will be locked out for %(lock_mins)d minutes." msgstr "" -#: nova/network/manager.py:220 +#: ../nova/api/ec2/__init__.py:169 ../nova/objectstore/handler.py:140 #, python-format -msgid "IP %s released that was not leased" +msgid "Authentication Failure: %s" +msgstr "认证失败:%s" + +#: ../nova/api/ec2/__init__.py:182 +#, python-format +msgid "Authenticated Request For %(uname)s:%(pname)s)" msgstr "" -#: nova/network/manager.py:442 +#: ../nova/api/ec2/__init__.py:207 #, python-format -msgid "Dissassociated %s stale fixed ip(s)" +msgid "action: %s" +msgstr "执行: %s" + +#: ../nova/api/ec2/__init__.py:209 +#, python-format +msgid "arg: %(key)s\t\tval: %(value)s" msgstr "" -#: nova/objectstore/handler.py:106 +#: ../nova/api/ec2/__init__.py:281 #, python-format -msgid "Unknown S3 value type %r" +msgid "" +"Unauthorized request for controller=%(controller)s and action=%(action)s" msgstr "" -#: nova/objectstore/handler.py:137 -msgid "Authenticated request" +#: ../nova/api/ec2/__init__.py:314 +#, python-format +msgid "InstanceNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:182 -msgid "List of buckets requested" +#: ../nova/api/ec2/__init__.py:320 +#, python-format +msgid "VolumeNotFound raised: %s" msgstr "" -#: nova/objectstore/handler.py:209 +#: ../nova/api/ec2/__init__.py:326 #, python-format -msgid "List keys for bucket %s" +msgid "NotFound raised: %s" +msgstr "引起没有找到的错误: %s" + +#: ../nova/api/ec2/__init__.py:329 +#, python-format +msgid "ApiError raised: %s" +msgstr "引发了Api错误: %s" + +#: ../nova/api/ec2/__init__.py:338 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "引发了意外的错误:%s" + +#: ../nova/api/ec2/__init__.py:343 +msgid "An unknown error has occurred. Please try your request again." +msgstr "发生了一个未知的错误. 请重试你的请求." + +#: ../nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" msgstr "" -#: nova/objectstore/handler.py:217 +#: ../nova/auth/dbdriver.py:106 ../nova/auth/ldapdriver.py:232 #, python-format -msgid "Unauthorized attempt to access bucket %s" +msgid "Project can't be created because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:235 +#: ../nova/auth/dbdriver.py:122 ../nova/auth/ldapdriver.py:243 #, python-format -msgid "Creating bucket %s" +msgid "Project can't be created because user %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:245 +#: ../nova/auth/dbdriver.py:135 ../nova/auth/ldapdriver.py:229 #, python-format -msgid "Deleting bucket %s" +msgid "Project can't be created because project %s already exists" msgstr "" -#: nova/objectstore/handler.py:249 +#: ../nova/auth/dbdriver.py:157 ../nova/auth/ldapdriver.py:268 #, python-format -msgid "Unauthorized attempt to delete bucket %s" +msgid "Project can't be modified because manager %s doesn't exist" msgstr "" -#: nova/objectstore/handler.py:271 +#: ../nova/auth/dbdriver.py:245 #, python-format -msgid "Getting object: %s / %s" +msgid "User \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:274 +#: ../nova/auth/dbdriver.py:248 #, python-format -msgid "Unauthorized attempt to get object %s from bucket %s" +msgid "Project \"%s\" not found" msgstr "" -#: nova/objectstore/handler.py:292 +#: ../nova/virt/xenapi_conn.py:129 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " +"and xenapi_connection_password to use connection_type=xenapi" +msgstr "" + +#: ../nova/virt/xenapi_conn.py:311 #, python-format -msgid "Putting object: %s / %s" +msgid "Task [%(name)s] %(task)s status: success %(result)s" msgstr "" -#: nova/objectstore/handler.py:295 +#: ../nova/virt/xenapi_conn.py:317 #, python-format -msgid "Unauthorized attempt to upload object %s to bucket %s" +msgid "Task [%(name)s] %(task)s status: %(status)s %(error_info)s" msgstr "" -#: nova/objectstore/handler.py:314 +#: ../nova/virt/xenapi_conn.py:331 ../nova/virt/xenapi_conn.py:344 #, python-format -msgid "Deleting object: %s / %s" +msgid "Got exception: %s" msgstr "" -#: nova/objectstore/handler.py:393 +#: ../nova/compute/monitor.py:259 #, python-format -msgid "Not authorized to upload image: invalid directory %s" +msgid "updating %s..." msgstr "" -#: nova/objectstore/handler.py:401 +#: ../nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: ../nova/compute/monitor.py:356 #, python-format -msgid "Not authorized to upload image: unauthorized bucket %s" +msgid "Cannot get blockstats for \"%(disk)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:406 +#: ../nova/compute/monitor.py:379 #, python-format -msgid "Starting image upload: %s" +msgid "Cannot get ifstats for \"%(interface)s\" on \"%(iid)s\"" msgstr "" -#: nova/objectstore/handler.py:420 +#: ../nova/compute/monitor.py:414 +msgid "unexpected exception getting connection" +msgstr "" + +#: ../nova/compute/monitor.py:429 #, python-format -msgid "Not authorized to update attributes of image %s" +msgid "Found instance: %s" msgstr "" -#: nova/objectstore/handler.py:428 +#: ../nova/volume/san.py:67 #, python-format -msgid "Toggling publicity flag of image %s %r" +msgid "Could not find iSCSI export for volume %s" msgstr "" -#: nova/objectstore/handler.py:433 +#: ../nova/api/ec2/apirequest.py:100 #, python-format -msgid "Updating user fields on image %s" +msgid "" +"Unsupported API request: controller = %(controller)s, action = %(action)s" msgstr "" -#: nova/objectstore/handler.py:447 +#: ../nova/api/openstack/__init__.py:55 #, python-format -msgid "Unauthorized attempt to delete image %s" +msgid "Caught error: %s" msgstr "" -#: nova/objectstore/handler.py:452 +#: ../nova/api/openstack/__init__.py:76 +msgid "Including admin operations in API." +msgstr "" + +#: ../nova/console/xvp.py:99 +msgid "Rebuilding xvp conf" +msgstr "" + +#: ../nova/console/xvp.py:116 #, python-format -msgid "Deleted image: %s" +msgid "Re-wrote %s" msgstr "" -#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 -#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 -msgid "No hosts found" +#: ../nova/console/xvp.py:121 +msgid "Stopping xvp" msgstr "" -#: nova/scheduler/driver.py:66 -msgid "Must implement a fallback schedule" +#: ../nova/console/xvp.py:134 +msgid "Starting xvp" msgstr "" -#: nova/scheduler/manager.py:69 +#: ../nova/console/xvp.py:141 #, python-format -msgid "Casting to %s %s for %s" +msgid "Error starting xvp: %s" msgstr "" -#: nova/scheduler/simple.py:63 -msgid "All hosts have too many cores" +#: ../nova/console/xvp.py:144 +msgid "Restarting xvp" msgstr "" -#: nova/scheduler/simple.py:95 -msgid "All hosts have too many gigabytes" +#: ../nova/console/xvp.py:146 +msgid "xvp not running..." msgstr "" -#: nova/scheduler/simple.py:115 -msgid "All hosts have too many networks" +#: ../bin/nova-manage.py:272 +msgid "" +"The above error may show that the database has not been created.\n" +"Please create a database using nova-manage sync db before running this " +"command." msgstr "" -#: nova/tests/test_cloud.py:198 -msgid "Can't test instances without a real virtual env." +#: ../bin/nova-manage.py:426 +msgid "" +"No more networks available. If this is a new installation, you need\n" +"to call something like this:\n" +"\n" +" nova-manage network create 10.0.0.0/8 10 64\n" +"\n" msgstr "" -#: nova/tests/test_cloud.py:210 -#, python-format -msgid "Need to watch instance %s until it's running..." +#: ../bin/nova-manage.py:431 +msgid "" +"The above error may show that the certificate db has not been created.\n" +"Please create a database by running a nova-api server on this host." msgstr "" -#: nova/tests/test_compute.py:104 -#, python-format -msgid "Running instances: %s" +#: ../bin/nova-manage.py:447 ../bin/nova-manage.py:536 +msgid "network" msgstr "" -#: nova/tests/test_compute.py:110 -#, python-format -msgid "After terminating instances: %s" +#: ../bin/nova-manage.py:448 +msgid "IP address" +msgstr "" + +#: ../bin/nova-manage.py:449 +msgid "MAC address" +msgstr "" + +#: ../bin/nova-manage.py:450 +msgid "hostname" msgstr "" -#: nova/tests/test_rpc.py:89 +#: ../bin/nova-manage.py:451 +msgid "host" +msgstr "" + +#: ../bin/nova-manage.py:537 +msgid "netmask" +msgstr "" + +#: ../bin/nova-manage.py:538 +msgid "start address" +msgstr "" + +#: ../nova/virt/disk.py:69 #, python-format -msgid "Nested received %s, %s" +msgid "Failed to load partition: %s" msgstr "" -#: nova/tests/test_rpc.py:94 +#: ../nova/virt/disk.py:91 #, python-format -msgid "Nested return %s" +msgid "Failed to mount filesystem: %s" msgstr "" -#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#: ../nova/virt/disk.py:124 #, python-format -msgid "Received %s" +msgid "nbd device %s did not show up" msgstr "" -#: nova/tests/test_volume.py:162 +#: ../nova/virt/disk.py:128 #, python-format -msgid "Target %s allocated" +msgid "Could not attach image to loopback: %s" msgstr "" -#: nova/virt/connection.py:73 -msgid "Failed to open connection to the hypervisor" +#: ../nova/virt/disk.py:151 +msgid "No free nbd devices" msgstr "" -#: nova/virt/fake.py:210 +#: ../doc/ext/nova_todo.py:46 #, python-format -msgid "Instance %s Not Found" +msgid "%(filename)s, line %(line_info)d" msgstr "" -#: nova/virt/hyperv.py:118 +#. FIXME(chiradeep): implement this +#: ../nova/virt/hyperv.py:118 msgid "In init host" msgstr "" -#: nova/virt/hyperv.py:131 +#: ../nova/virt/hyperv.py:131 #, python-format msgid "Attempt to create duplicate vm %s" msgstr "" -#: nova/virt/hyperv.py:148 +#: ../nova/virt/hyperv.py:148 #, python-format msgid "Starting VM %s " msgstr "" -#: nova/virt/hyperv.py:150 +#: ../nova/virt/hyperv.py:150 #, python-format msgid "Started VM %s " msgstr "" -#: nova/virt/hyperv.py:152 +#: ../nova/virt/hyperv.py:152 #, python-format msgid "spawn vm failed: %s" msgstr "" -#: nova/virt/hyperv.py:169 +#: ../nova/virt/hyperv.py:169 #, python-format msgid "Failed to create VM %s" msgstr "" -#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 -#, python-format -msgid "Created VM %s..." -msgstr "" - -#: nova/virt/hyperv.py:188 +#: ../nova/virt/hyperv.py:188 #, python-format msgid "Set memory for vm %s..." msgstr "" -#: nova/virt/hyperv.py:198 +#: ../nova/virt/hyperv.py:198 #, python-format msgid "Set vcpus for vm %s..." msgstr "" -#: nova/virt/hyperv.py:202 +#: ../nova/virt/hyperv.py:202 #, python-format -msgid "Creating disk for %s by attaching disk file %s" +msgid "Creating disk for %(vm_name)s by attaching disk file %(vhdfile)s" msgstr "" -#: nova/virt/hyperv.py:227 +#: ../nova/virt/hyperv.py:227 #, python-format msgid "Failed to add diskdrive to VM %s" msgstr "" -#: nova/virt/hyperv.py:230 +#: ../nova/virt/hyperv.py:230 #, python-format msgid "New disk drive path is %s" msgstr "" -#: nova/virt/hyperv.py:247 +#: ../nova/virt/hyperv.py:247 #, python-format msgid "Failed to add vhd file to VM %s" msgstr "" -#: nova/virt/hyperv.py:249 +#: ../nova/virt/hyperv.py:249 #, python-format msgid "Created disk for %s" msgstr "" -#: nova/virt/hyperv.py:253 +#: ../nova/virt/hyperv.py:253 #, python-format msgid "Creating nic for %s " msgstr "" -#: nova/virt/hyperv.py:272 +#: ../nova/virt/hyperv.py:272 msgid "Failed creating a port on the external vswitch" msgstr "" -#: nova/virt/hyperv.py:273 +#: ../nova/virt/hyperv.py:273 #, python-format msgid "Failed creating port for %s" msgstr "" -#: nova/virt/hyperv.py:275 +#: ../nova/virt/hyperv.py:276 #, python-format -msgid "Created switch port %s on switch %s" +msgid "Created switch port %(vm_name)s on switch %(ext_path)s" msgstr "" -#: nova/virt/hyperv.py:285 +#: ../nova/virt/hyperv.py:286 #, python-format msgid "Failed to add nic to VM %s" msgstr "" -#: nova/virt/hyperv.py:287 +#: ../nova/virt/hyperv.py:288 #, python-format msgid "Created nic for %s " msgstr "" -#: nova/virt/hyperv.py:320 +#: ../nova/virt/hyperv.py:321 #, python-format msgid "WMI job failed: %s" msgstr "" -#: nova/virt/hyperv.py:322 +#: ../nova/virt/hyperv.py:325 #, python-format -msgid "WMI job succeeded: %s, Elapsed=%s " +msgid "WMI job succeeded: %(desc)s, Elapsed=%(elap)s " msgstr "" -#: nova/virt/hyperv.py:358 +#: ../nova/virt/hyperv.py:361 #, python-format msgid "Got request to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:383 +#: ../nova/virt/hyperv.py:386 #, python-format msgid "Failed to destroy vm %s" msgstr "" -#: nova/virt/hyperv.py:389 +#: ../nova/virt/hyperv.py:393 #, python-format -msgid "Del: disk %s vm %s" +msgid "Del: disk %(vhdfile)s vm %(instance_name)s" msgstr "" -#: nova/virt/hyperv.py:405 +#: ../nova/virt/hyperv.py:415 #, python-format msgid "" -"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " -"cpu_time=%s" +"Got Info for vm %(instance_id)s: state=%(state)s, mem=%(memusage)s, " +"num_cpu=%(numprocs)s, cpu_time=%(uptime)s" msgstr "" -#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#: ../nova/virt/hyperv.py:451 #, python-format -msgid "duplicate name found: %s" +msgid "Successfully changed vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:444 +#: ../nova/virt/hyperv.py:454 #, python-format -msgid "Successfully changed vm state of %s to %s" +msgid "Failed to change vm state of %(vm_name)s to %(req_state)s" msgstr "" -#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#: ../nova/compute/api.py:71 #, python-format -msgid "Failed to change vm state of %s to %s" +msgid "Instance %d was not found in get_network_topic" msgstr "" -#: nova/virt/images.py:70 +#: ../nova/compute/api.py:77 #, python-format -msgid "Finished retreving %s -- placed in %s" +msgid "Instance %d has no host" msgstr "" -#: nova/virt/libvirt_conn.py:144 +#: ../nova/compute/api.py:97 #, python-format -msgid "Connecting to libvirt: %s" +msgid "Quota exceeeded for %(pid)s, tried to run %(min_count)s instances" msgstr "" -#: nova/virt/libvirt_conn.py:157 -msgid "Connection to libvirt broke" +#: ../nova/compute/api.py:99 +#, python-format +msgid "" +"Instance quota exceeded. You can only run %s more instances of this type." msgstr "" -#: nova/virt/libvirt_conn.py:229 +#: ../nova/compute/api.py:112 +msgid "Creating a raw instance" +msgstr "" + +#: ../nova/compute/api.py:160 #, python-format -msgid "instance %s: deleting instance files %s" +msgid "Going to run %s instances..." msgstr "" -#: nova/virt/libvirt_conn.py:271 +#: ../nova/compute/api.py:187 #, python-format -msgid "No disk at %s" +msgid "Casting to scheduler for %(pid)s/%(uid)s's instance %(instance_id)s" msgstr "" -#: nova/virt/libvirt_conn.py:278 -msgid "Instance snapshotting is not supported for libvirtat this time" +#: ../nova/compute/api.py:292 +#, python-format +msgid "Going to try to terminate %s" msgstr "" -#: nova/virt/libvirt_conn.py:294 +#: ../nova/compute/api.py:296 #, python-format -msgid "instance %s: rebooted" +msgid "Instance %d was not found during terminate" msgstr "" -#: nova/virt/libvirt_conn.py:297 +#: ../nova/compute/api.py:301 #, python-format -msgid "_wait_for_reboot failed: %s" +msgid "Instance %d is already being terminated" msgstr "" -#: nova/virt/libvirt_conn.py:340 +#: ../nova/compute/api.py:481 #, python-format -msgid "instance %s: rescued" +msgid "Invalid device specified: %s. Example device: /dev/vdb" msgstr "" -#: nova/virt/libvirt_conn.py:343 +#: ../nova/compute/api.py:496 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: ../nova/rpc.py:98 #, python-format -msgid "_wait_for_rescue failed: %s" +msgid "" +"AMQP server on %(fl_host)s:%(fl_port)d is unreachable. Trying again in " +"%(fl_intv)d seconds." msgstr "" -#: nova/virt/libvirt_conn.py:370 +#: ../nova/rpc.py:103 #, python-format -msgid "instance %s: is running" +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "已尝试 %d 次,均无法连接到AMQP服务器。关闭中。" + +#: ../nova/rpc.py:122 +msgid "Reconnected to queue" +msgstr "重新与队列建立连接" + +#: ../nova/rpc.py:129 +msgid "Failed to fetch message from queue" +msgstr "从队列获取数据失败" + +#: ../nova/rpc.py:159 +#, python-format +msgid "Initing the Adapter Consumer for %s" msgstr "" -#: nova/virt/libvirt_conn.py:381 +#: ../nova/rpc.py:178 #, python-format -msgid "instance %s: booted" +msgid "received %s" +msgstr "已接收 %s" + +#. NOTE(vish): we may not want to ack here, but that means that bad +#. messages stay in the queue indefinitely, so for now +#. we just log the message and send an error string +#. back to the caller +#: ../nova/rpc.py:191 +#, python-format +msgid "no method for message: %s" +msgstr "没有适用于消息 %s 的方法" + +#: ../nova/rpc.py:192 +#, python-format +msgid "No method for message: %s" +msgstr "没有适用于消息 %s 的方法" + +#: ../nova/rpc.py:253 +#, python-format +msgid "Returning exception %s to caller" +msgstr "返回 %s 异常给调用者" + +#: ../nova/rpc.py:294 +#, python-format +msgid "unpacked context: %s" msgstr "" -#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#: ../nova/rpc.py:313 +msgid "Making asynchronous call..." +msgstr "产生异步调用中……" + +#: ../nova/rpc.py:316 #, python-format -msgid "instance %s: failed to boot" +msgid "MSG_ID is %s" +msgstr "消息ID(MSG_ID)是 %s" + +#: ../nova/rpc.py:354 +msgid "Making asynchronous cast..." msgstr "" -#: nova/virt/libvirt_conn.py:395 +#: ../nova/rpc.py:364 #, python-format -msgid "virsh said: %r" +msgid "response %s" +msgstr "回复 %s" + +#: ../nova/rpc.py:373 +#, python-format +msgid "topic is %s" +msgstr "话题是 %s" + +#: ../nova/rpc.py:374 +#, python-format +msgid "message %s" +msgstr "消息 %s" + +#: ../nova/volume/driver.py:78 +#, python-format +msgid "Recovering from a failed execute. Try number %s" msgstr "" -#: nova/virt/libvirt_conn.py:399 -msgid "cool, it's a device" +#: ../nova/volume/driver.py:87 +#, python-format +msgid "volume group %s doesn't exist" msgstr "" -#: nova/virt/libvirt_conn.py:407 +#: ../nova/volume/driver.py:220 #, python-format -msgid "data: %r, fpath: %r" +msgid "FAKE AOE: %s" msgstr "" -#: nova/virt/libvirt_conn.py:415 +#: ../nova/volume/driver.py:233 +msgid "Skipping ensure_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:279 ../nova/volume/driver.py:288 +msgid "Skipping remove_export. No iscsi_target " +msgstr "" + +#: ../nova/volume/driver.py:347 #, python-format -msgid "Contents of file %s: %r" +msgid "FAKE ISCSI: %s" msgstr "" -#: nova/virt/libvirt_conn.py:449 +#: ../nova/volume/driver.py:359 #, python-format -msgid "instance %s: Creating image" +msgid "rbd has no pool %s" msgstr "" -#: nova/virt/libvirt_conn.py:505 +#: ../nova/volume/driver.py:414 #, python-format -msgid "instance %s: injecting key into image %s" +msgid "Sheepdog is not working: %s" +msgstr "" + +#: ../nova/volume/driver.py:416 +msgid "Sheepdog is not working" msgstr "" -#: nova/virt/libvirt_conn.py:508 +#: ../nova/wsgi.py:68 #, python-format -msgid "instance %s: injecting net into image %s" +msgid "Starting %(arg0)s on %(host)s:%(port)s" +msgstr "" + +#: ../nova/wsgi.py:147 +msgid "You must implement __call__" +msgstr "" + +#: ../bin/nova-instancemonitor.py:55 +msgid "Starting instance monitor" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:58 +msgid "leasing ip" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:73 +msgid "Adopted old lease or got a change of mac/hostname" +msgstr "" + +#: ../bin/nova-dhcpbridge.py:80 +msgid "releasing ip" msgstr "" -#: nova/virt/libvirt_conn.py:516 +#: ../bin/nova-dhcpbridge.py:123 #, python-format -msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgid "" +"Called %(action)s for mac %(mac)s with ip %(ip)s and hostname %(hostname)s " +"on interface %(interface)s" msgstr "" -#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#: ../nova/virt/fake.py:239 #, python-format -msgid "instance %s: starting toXML method" +msgid "Instance %s Not Found" msgstr "" -#: nova/virt/libvirt_conn.py:589 +#: ../nova/network/manager.py:153 #, python-format -msgid "instance %s: finished toXML method" +msgid "Dissassociated %s stale fixed ip(s)" msgstr "" -#: nova/virt/xenapi_conn.py:113 -msgid "" -"Must specify xenapi_connection_url, xenapi_connection_username (optionally), " -"and xenapi_connection_password to use connection_type=xenapi" +#: ../nova/network/manager.py:157 +msgid "setting network host" msgstr "" -#: nova/virt/xenapi_conn.py:263 +#: ../nova/network/manager.py:212 #, python-format -msgid "Task [%s] %s status: success %s" +msgid "Leasing IP %s" msgstr "" -#: nova/virt/xenapi_conn.py:271 +#: ../nova/network/manager.py:216 #, python-format -msgid "Task [%s] %s status: %s %s" +msgid "IP %s leased that isn't associated" msgstr "" -#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#: ../nova/network/manager.py:220 #, python-format -msgid "Got exception: %s" +msgid "IP %(address)s leased to bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:72 +#: ../nova/network/manager.py:228 #, python-format -msgid "%s: _db_content => %s" +msgid "IP %s leased that was already deallocated" msgstr "" -#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 -#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 -msgid "Raising NotImplemented" +#: ../nova/network/manager.py:233 +#, python-format +msgid "Releasing IP %s" msgstr "" -#: nova/virt/xenapi/fake.py:249 +#: ../nova/network/manager.py:237 #, python-format -msgid "xenapi.fake does not have an implementation for %s" +msgid "IP %s released that isn't associated" msgstr "" -#: nova/virt/xenapi/fake.py:283 +#: ../nova/network/manager.py:241 #, python-format -msgid "Calling %s %s" +msgid "IP %(address)s released from bad mac %(inst_addr)s vs %(mac)s" msgstr "" -#: nova/virt/xenapi/fake.py:288 +#: ../nova/network/manager.py:244 #, python-format -msgid "Calling getter %s" +msgid "IP %s released that was not leased" msgstr "" -#: nova/virt/xenapi/fake.py:340 -#, python-format +#: ../nova/network/manager.py:519 msgid "" -"xenapi.fake does not have an implementation for %s or it has been called " -"with the wrong number of arguments" +"The sum between the number of networks and the vlan start cannot be greater " +"than 4094" msgstr "" -#: nova/virt/xenapi/network_utils.py:40 +#: ../nova/virt/xenapi/volume_utils.py:57 #, python-format -msgid "Found non-unique network for bridge %s" +msgid "Introducing %s..." msgstr "" -#: nova/virt/xenapi/network_utils.py:43 +#: ../nova/virt/xenapi/volume_utils.py:74 #, python-format -msgid "Found no network for bridge %s" +msgid "Introduced %(label)s as %(sr_ref)s." msgstr "" -#: nova/virt/xenapi/vm_utils.py:127 +#: ../nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: ../nova/virt/xenapi/volume_utils.py:90 #, python-format -msgid "Created VM %s as %s." +msgid "Unable to find SR from VBD %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:147 +#: ../nova/virt/xenapi/volume_utils.py:96 #, python-format -msgid "Creating VBD for VM %s, VDI %s ... " +msgid "Forgetting SR %s ... " msgstr "" -#: nova/virt/xenapi/vm_utils.py:149 +#: ../nova/virt/xenapi/volume_utils.py:101 #, python-format -msgid "Created VBD %s for VM %s, VDI %s." +msgid "Ignoring exception %(exc)s when getting PBDs for %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:165 +#: ../nova/virt/xenapi/volume_utils.py:107 #, python-format -msgid "VBD not found in instance %s" +msgid "Ignoring exception %(exc)s when unplugging PBD %(pbd)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:175 +#: ../nova/virt/xenapi/volume_utils.py:111 #, python-format -msgid "Unable to unplug VBD %s" +msgid "Forgetting SR %s done." msgstr "" -#: nova/virt/xenapi/vm_utils.py:187 +#: ../nova/virt/xenapi/volume_utils.py:113 #, python-format -msgid "Unable to destroy VBD %s" +msgid "Ignoring exception %(exc)s when forgetting SR %(sr_ref)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:202 +#: ../nova/virt/xenapi/volume_utils.py:123 #, python-format -msgid "Creating VIF for VM %s, network %s." +msgid "Unable to introduce VDI on SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:205 +#: ../nova/virt/xenapi/volume_utils.py:128 #, python-format -msgid "Created VIF %s for VM %s, network %s." +msgid "Unable to get record of VDI %s on" msgstr "" -#: nova/virt/xenapi/vm_utils.py:216 +#: ../nova/virt/xenapi/volume_utils.py:146 #, python-format -msgid "Snapshotting VM %s with label '%s'..." +msgid "Unable to introduce VDI for SR %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:229 +#: ../nova/virt/xenapi/volume_utils.py:175 #, python-format -msgid "Created snapshot %s from VM %s." +msgid "Unable to obtain target information %(device_path)s, %(mountpoint)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:243 +#: ../nova/virt/xenapi/volume_utils.py:197 #, python-format -msgid "Asking xapi to upload %s as '%s'" +msgid "Mountpoint cannot be translated: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:261 +#: ../nova/objectstore/image.py:262 #, python-format -msgid "Asking xapi to fetch %s as %s" +msgid "Failed to decrypt private key: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:279 +#: ../nova/objectstore/image.py:269 #, python-format -msgid "Looking up vdi %s for PV kernel" +msgid "Failed to decrypt initialization vector: %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:290 +#: ../nova/objectstore/image.py:277 #, python-format -msgid "PV Kernel in VDI:%d" +msgid "Failed to decrypt image file %(image_file)s: %(err)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:318 +#: ../nova/objectstore/handler.py:106 #, python-format -msgid "VDI %s is still available" +msgid "Unknown S3 value type %r" msgstr "" -#: nova/virt/xenapi/vm_utils.py:331 +#: ../nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: ../nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: ../nova/objectstore/handler.py:209 #, python-format -msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgid "List keys for bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:333 +#: ../nova/objectstore/handler.py:217 #, python-format -msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgid "Unauthorized attempt to access bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:390 +#: ../nova/objectstore/handler.py:235 #, python-format -msgid "VHD %s has parent %s" +msgid "Creating bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:407 +#: ../nova/objectstore/handler.py:245 #, python-format -msgid "Re-scanning SR %s" +msgid "Deleting bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:431 +#: ../nova/objectstore/handler.py:249 #, python-format -msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgid "Unauthorized attempt to delete bucket %s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:448 +#: ../nova/objectstore/handler.py:273 #, python-format -msgid "No VDIs found for VM %s" +msgid "Getting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vm_utils.py:452 +#: ../nova/objectstore/handler.py:276 #, python-format -msgid "Unexpected number of VDIs (%s) found for VM %s" +msgid "Unauthorized attempt to get object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:62 +#: ../nova/objectstore/handler.py:296 #, python-format -msgid "Attempted to create non-unique name %s" +msgid "Putting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:99 +#: ../nova/objectstore/handler.py:299 #, python-format -msgid "Starting VM %s..." +msgid "Unauthorized attempt to upload object %(nm)s to bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:101 +#: ../nova/objectstore/handler.py:318 #, python-format -msgid "Spawning VM %s created %s." +msgid "Deleting object: %(bname)s / %(nm)s" msgstr "" -#: nova/virt/xenapi/vmops.py:112 +#: ../nova/objectstore/handler.py:322 #, python-format -msgid "Instance %s: booted" +msgid "Unauthorized attempt to delete object %(nm)s from bucket %(bname)s" msgstr "" -#: nova/virt/xenapi/vmops.py:137 +#: ../nova/objectstore/handler.py:396 #, python-format -msgid "Instance not present %s" +msgid "Not authorized to upload image: invalid directory %s" msgstr "" -#: nova/virt/xenapi/vmops.py:166 +#: ../nova/objectstore/handler.py:404 #, python-format -msgid "Starting snapshot for VM %s" +msgid "Not authorized to upload image: unauthorized bucket %s" msgstr "" -#: nova/virt/xenapi/vmops.py:174 +#: ../nova/objectstore/handler.py:409 #, python-format -msgid "Unable to Snapshot %s: %s" +msgid "Starting image upload: %s" msgstr "" -#: nova/virt/xenapi/vmops.py:184 +#: ../nova/objectstore/handler.py:423 #, python-format -msgid "Finished snapshot and upload for VM %s" +msgid "Not authorized to update attributes of image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:252 +#: ../nova/objectstore/handler.py:431 #, python-format -msgid "suspend: instance not present %s" +msgid "Toggling publicity flag of image %(image_id)s %(newstatus)r" msgstr "" -#: nova/virt/xenapi/vmops.py:262 +#. other attributes imply update +#: ../nova/objectstore/handler.py:436 #, python-format -msgid "resume: instance not present %s" +msgid "Updating user fields on image %s" msgstr "" -#: nova/virt/xenapi/vmops.py:271 +#: ../nova/objectstore/handler.py:450 #, python-format -msgid "Instance not found %s" +msgid "Unauthorized attempt to delete image %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:57 +#: ../nova/objectstore/handler.py:455 #, python-format -msgid "Introducing %s..." +msgid "Deleted image: %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:74 +#: ../nova/auth/manager.py:259 #, python-format -msgid "Introduced %s as %s." +msgid "Looking up user: %r" msgstr "" -#: nova/virt/xenapi/volume_utils.py:78 -msgid "Unable to create Storage Repository" +#: ../nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:90 +#: ../nova/auth/manager.py:264 #, python-format -msgid "Unable to find SR from VBD %s" +msgid "No user found for access key %s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:96 +#: ../nova/auth/manager.py:270 #, python-format -msgid "Forgetting SR %s ... " +msgid "Using project name = user name (%s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:101 +#: ../nova/auth/manager.py:277 #, python-format -msgid "Ignoring exception %s when getting PBDs for %s" +msgid "failed authorization: no project named %(pjid)s (user=%(uname)s)" msgstr "" -#: nova/virt/xenapi/volume_utils.py:107 +#: ../nova/auth/manager.py:279 #, python-format -msgid "Ignoring exception %s when unplugging PBD %s" +msgid "No project called %s could be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:111 +#: ../nova/auth/manager.py:287 #, python-format -msgid "Forgetting SR %s done." +msgid "" +"Failed authorization: user %(uname)s not admin and not member of project " +"%(pjname)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:113 +#: ../nova/auth/manager.py:289 #, python-format -msgid "Ignoring exception %s when forgetting SR %s" +msgid "User %(uid)s is not a member of project %(pjid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:123 +#: ../nova/auth/manager.py:298 ../nova/auth/manager.py:309 #, python-format -msgid "Unable to introduce VDI on SR %s" +msgid "Invalid signature for user %s" +msgstr "" + +#: ../nova/auth/manager.py:299 ../nova/auth/manager.py:310 +msgid "Signature does not match" msgstr "" -#: nova/virt/xenapi/volume_utils.py:128 +#: ../nova/auth/manager.py:380 +msgid "Must specify project" +msgstr "" + +#: ../nova/auth/manager.py:414 #, python-format -msgid "Unable to get record of VDI %s on" +msgid "The %s role can not be found" msgstr "" -#: nova/virt/xenapi/volume_utils.py:146 +#: ../nova/auth/manager.py:416 #, python-format -msgid "Unable to introduce VDI for SR %s" +msgid "The %s role is global only" msgstr "" -#: nova/virt/xenapi/volume_utils.py:175 +#: ../nova/auth/manager.py:420 #, python-format -msgid "Unable to obtain target information %s, %s" +msgid "Adding role %(role)s to user %(uid)s in project %(pid)s" msgstr "" -#: nova/virt/xenapi/volume_utils.py:197 +#: ../nova/auth/manager.py:423 #, python-format -msgid "Mountpoint cannot be translated: %s" +msgid "Adding sitewide role %(role)s to user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:51 +#: ../nova/auth/manager.py:448 #, python-format -msgid "Attach_volume: %s, %s, %s" +msgid "Removing role %(role)s from user %(uid)s on project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:69 +#: ../nova/auth/manager.py:451 #, python-format -msgid "Unable to create VDI on SR %s for instance %s" +msgid "Removing sitewide role %(role)s from user %(uid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:81 +#: ../nova/auth/manager.py:515 #, python-format -msgid "Unable to use SR %s for instance %s" +msgid "Created project %(name)s with manager %(manager_user)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:93 +#: ../nova/auth/manager.py:533 #, python-format -msgid "Unable to attach volume to instance %s" +msgid "modifying project %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:95 +#: ../nova/auth/manager.py:545 #, python-format -msgid "Mountpoint %s attached to instance %s" +msgid "Adding user %(uid)s to project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:106 +#: ../nova/auth/manager.py:566 #, python-format -msgid "Detach_volume: %s, %s" +msgid "Remove user %(uid)s from project %(pid)s" msgstr "" -#: nova/virt/xenapi/volumeops.py:113 +#: ../nova/auth/manager.py:592 #, python-format -msgid "Unable to locate volume %s" +msgid "Deleting project %s" +msgstr "删除项目 %s" + +#: ../nova/auth/manager.py:650 +#, python-format +msgid "Created user %(rvname)s (admin: %(rvadmin)r)" msgstr "" -#: nova/virt/xenapi/volumeops.py:121 +#: ../nova/auth/manager.py:659 #, python-format -msgid "Unable to detach volume %s" +msgid "Deleting user %s" +msgstr "删除用户 %s" + +#: ../nova/auth/manager.py:669 +#, python-format +msgid "Access Key change for user %s" msgstr "" -#: nova/virt/xenapi/volumeops.py:128 +#: ../nova/auth/manager.py:671 #, python-format -msgid "Mountpoint %s detached from instance %s" +msgid "Secret Key change for user %s" msgstr "" -#: nova/volume/api.py:44 +#: ../nova/auth/manager.py:673 #, python-format -msgid "Quota exceeeded for %s, tried to create %sG volume" +msgid "Admin status set to %(admin)r for user %(uid)s" msgstr "" -#: nova/volume/api.py:46 +#: ../nova/auth/manager.py:722 #, python-format -msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgid "No vpn data for project %s" +msgstr "没有 %s 项目的vpn数据" + +#: ../nova/service.py:161 +#, python-format +msgid "Starting %(topic)s node (version %(vcs_string)s)" msgstr "" -#: nova/volume/api.py:70 nova/volume/api.py:95 -msgid "Volume status must be available" +#: ../nova/service.py:174 +msgid "Service killed that has no database entry" +msgstr "因无数据库记录,服务已被中止" + +#: ../nova/service.py:195 +msgid "The service database object disappeared, Recreating it." msgstr "" -#: nova/volume/api.py:97 -msgid "Volume is already attached" +#: ../nova/service.py:207 +msgid "Recovered model server connection!" +msgstr "与模型服务器(model server)的连接已恢复!" + +#: ../nova/service.py:213 +msgid "model server went away" +msgstr "失去与模型服务器的连接" + +#: ../nova/auth/ldapdriver.py:174 +#, python-format +msgid "LDAP user %s already exists" +msgstr "LDAP 用户 %s 已存在" + +#: ../nova/auth/ldapdriver.py:205 +#, python-format +msgid "LDAP object for %s doesn't exist" msgstr "" -#: nova/volume/api.py:103 -msgid "Volume is already detached" +#: ../nova/auth/ldapdriver.py:348 +#, python-format +msgid "User %s doesn't exist" +msgstr "用户 %s 不存在" + +#: ../nova/auth/ldapdriver.py:472 +#, python-format +msgid "Group can't be created because group %s already exists" msgstr "" -#: nova/volume/driver.py:76 +#: ../nova/auth/ldapdriver.py:478 #, python-format -msgid "Recovering from a failed execute. Try number %s" +msgid "Group can't be created because user %s doesn't exist" msgstr "" -#: nova/volume/driver.py:85 +#: ../nova/auth/ldapdriver.py:495 #, python-format -msgid "volume group %s doesn't exist" +msgid "User %s can't be searched in group because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:210 +#: ../nova/auth/ldapdriver.py:507 #, python-format -msgid "FAKE AOE: %s" +msgid "User %s can't be added to the group because the user doesn't exist" msgstr "" -#: nova/volume/driver.py:315 +#: ../nova/auth/ldapdriver.py:510 ../nova/auth/ldapdriver.py:521 #, python-format -msgid "FAKE ISCSI: %s" +msgid "The group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:85 +#: ../nova/auth/ldapdriver.py:513 #, python-format -msgid "Re-exporting %s volumes" +msgid "User %(uid)s is already a member of the group %(group_dn)s" msgstr "" -#: nova/volume/manager.py:93 +#: ../nova/auth/ldapdriver.py:524 #, python-format -msgid "volume %s: creating" +msgid "" +"User %s can't be removed from the group because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:102 +#: ../nova/auth/ldapdriver.py:528 #, python-format -msgid "volume %s: creating lv of size %sG" +msgid "User %s is not a member of the group" msgstr "" -#: nova/volume/manager.py:106 +#: ../nova/auth/ldapdriver.py:542 #, python-format -msgid "volume %s: creating export" +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." msgstr "" -#: nova/volume/manager.py:113 +#: ../nova/auth/ldapdriver.py:549 #, python-format -msgid "volume %s: created successfully" +msgid "User %s can't be removed from all because the user doesn't exist" msgstr "" -#: nova/volume/manager.py:121 -msgid "Volume is still attached" +#: ../nova/auth/ldapdriver.py:564 +#, python-format +msgid "Group at dn %s doesn't exist" msgstr "" -#: nova/volume/manager.py:123 -msgid "Volume is not local to this node" +#: ../nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" msgstr "" -#: nova/volume/manager.py:124 +#: ../nova/virt/xenapi/network_utils.py:43 #, python-format -msgid "volume %s: removing export" +msgid "Found no network for bridge %s" msgstr "" -#: nova/volume/manager.py:126 +#: ../nova/api/ec2/admin.py:97 #, python-format -msgid "volume %s: deleting" +msgid "Creating new user: %s" +msgstr "创建新用户: %s" + +#: ../nova/api/ec2/admin.py:105 +#, python-format +msgid "Deleting user: %s" +msgstr "删除用户: %s" + +#: ../nova/api/ec2/admin.py:127 +#, python-format +msgid "Adding role %(role)s to user %(user)s for project %(project)s" msgstr "" -#: nova/volume/manager.py:129 +#: ../nova/api/ec2/admin.py:131 #, python-format -msgid "volume %s: deleted successfully" +msgid "Adding sitewide role %(role)s to user %(user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:137 +#, python-format +msgid "Removing role %(role)s from user %(user)s for project %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:141 +#, python-format +msgid "Removing sitewide role %(role)s from user %(user)s" msgstr "" + +#: ../nova/api/ec2/admin.py:146 ../nova/api/ec2/admin.py:223 +msgid "operation must be add or remove" +msgstr "操作必须为添加或删除" + +#: ../nova/api/ec2/admin.py:159 +#, python-format +msgid "Getting x509 for user: %(name)s on project: %(project)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:177 +#, python-format +msgid "Create project %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:190 +#, python-format +msgid "Modify project: %(name)s managed by %(manager_user)s" +msgstr "" + +#: ../nova/api/ec2/admin.py:200 +#, python-format +msgid "Delete project: %s" +msgstr "删除工程 %s" + +#: ../nova/api/ec2/admin.py:214 +#, python-format +msgid "Adding user %(user)s to project %(project)s" +msgstr "添加用户 %(user)s 到项目 %(project)s 中" + +#: ../nova/api/ec2/admin.py:218 +#, python-format +msgid "Removing user %(user)s from project %(project)s" +msgstr "从项目 %(project)s 中移除用户 %(user)s" + +#, python-format +#~ msgid "" +#~ "%s\n" +#~ "Command: %s\n" +#~ "Exit code: %s\n" +#~ "Stdout: %r\n" +#~ "Stderr: %r" +#~ msgstr "" +#~ "%s\n" +#~ "命令:%s\n" +#~ "退出代码:%s\n" +#~ "标准输出(stdout):%r\n" +#~ "标准错误(stderr):%r" + +#, python-format +#~ msgid "Binding %s to %s with key %s" +#~ msgstr "将%s绑定到%s(以%s键值)" + +#, python-format +#~ msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +#~ msgstr "位于%s:%d的AMQP服务器不可用。%d秒后重试。" + +#, python-format +#~ msgid "Getting from %s: %s" +#~ msgstr "从%s获得如下内容:%s" + +#, python-format +#~ msgid "Starting %s node" +#~ msgstr "启动%s节点" + +#, python-format +#~ msgid "Data store %s is unreachable. Trying again in %d seconds." +#~ msgstr "数据储存服务%s不可用。%d秒之后继续尝试。" + +#, python-format +#~ msgid "(%s) publish (key: %s) %s" +#~ msgstr "(%s)发布(键值:%s)%s" + +#, python-format +#~ msgid "Couldn't get IP, using 127.0.0.1 %s" +#~ msgstr "不能获取IP,将使用 127.0.0.1 %s" + +#, python-format +#~ msgid "" +#~ "Access key %s has had %d failed authentications and will be locked out for " +#~ "%d minutes." +#~ msgstr "访问键 %s时,存在%d个失败的认证,将于%d分钟后解锁" + +#, python-format +#~ msgid "Authenticated Request For %s:%s)" +#~ msgstr "为%s:%s申请认证" + +#, python-format +#~ msgid "arg: %s\t\tval: %s" +#~ msgstr "键为: %s\t\t值为: %s" + +#, python-format +#~ msgid "Getting x509 for user: %s on project: %s" +#~ msgstr "为用户 %s从工程%s中获取 x509" + +#, python-format +#~ msgid "Create project %s managed by %s" +#~ msgstr "创建工程%s,此工程由%s管理" + +#, python-format +#~ msgid "Unsupported API request: controller = %s,action = %s" +#~ msgstr "不支持的API请求: 控制器 = %s,执行 = %s" + +#, python-format +#~ msgid "Adding sitewide role %s to user %s" +#~ msgstr "增加站点范围的 %s角色给用户 %s" + +#, python-format +#~ msgid "Adding user %s to project %s" +#~ msgstr "增加用户%s到%s工程" + +#, python-format +#~ msgid "Unauthorized request for controller=%s and action=%s" +#~ msgstr "对控制器=%s及动作=%s未经授权" + +#, python-format +#~ msgid "Removing user %s from project %s" +#~ msgstr "正将用户%s从工程%s中移除" + +#, python-format +#~ msgid "Adding role %s to user %s for project %s" +#~ msgstr "正将%s角色赋予用户%s(在工程%s中)" + +#, python-format +#~ msgid "Removing role %s from user %s for project %s" +#~ msgstr "正将角色%s从用户%s在工程%s中移除" diff --git a/run_tests.sh b/run_tests.sh index 8f4d37cd4..9aa555484 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,7 +6,9 @@ function usage { echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -x, --stop Stop running tests after the first error or failure." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -p, --pep8 Just run pep8" echo " -h, --help Print this usage message" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," @@ -21,6 +23,7 @@ function process_option { -V|--virtual-env) let always_venv=1; let never_venv=0;; -N|--no-virtual-env) let always_venv=0; let never_venv=1;; -f|--force) let force=1;; + -p|--pep8) let just_pep8=1;; *) noseargs="$noseargs $1" esac } @@ -32,6 +35,7 @@ never_venv=0 force=0 noseargs= wrapper="" +just_pep8=0 for arg in "$@"; do process_option $arg @@ -53,6 +57,24 @@ function run_tests { return $RESULT } +function run_pep8 { + echo "Running pep8 ..." + # Opt-out files from pep8 + ignore_scripts="*.sh:*nova-debug:*clean-vlans" + ignore_files="*eventlet-patch:*pip-requires" + ignore_dirs="*ajaxterm*" + GLOBIGNORE="$ignore_scripts:$ignore_files:$ignore_dirs" + srcfiles=`find bin -type f ! -name "nova.conf*"` + srcfiles+=" `find tools/*`" + srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" + pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} +} + +if [ $just_pep8 -eq 1 ]; then + run_pep8 + exit +fi + NOSETESTS="python run_tests.py $noseargs" if [ $never_venv -eq 0 ] @@ -81,11 +103,9 @@ then fi fi -if [ -z "$noseargs" ]; -then - srcfiles=`find bin -type f ! -name "nova.conf*"` - srcfiles+=" nova setup.py plugins/xenserver/xenapi/etc/xapi.d/plugins/glance" - run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py ${srcfiles} || exit 1 -else - run_tests +run_tests || exit + +# Also run pep8 if no options were provided. +if [ -z "$noseargs" ]; then + run_pep8 fi @@ -16,6 +16,7 @@ # License for the specific language governing permissions and limitations # under the License. +import gettext import glob import os import subprocess @@ -24,15 +25,20 @@ import sys from setuptools import find_packages from setuptools.command.sdist import sdist +# In order to run the i18n commands for compiling and +# installing message catalogs, we use DistUtilsExtra. +# Don't make this a hard requirement, but warn that +# i18n commands won't be available if DistUtilsExtra is +# not installed... try: - import DistUtilsExtra.auto + from DistUtilsExtra.auto import setup except ImportError: - print >> sys.stderr, 'To build nova you need '\ - 'https://launchpad.net/python-distutils-extra' - sys.exit(1) -assert DistUtilsExtra.auto.__version__ >= '2.18',\ - 'needs DistUtilsExtra.auto >= 2.18' + from setuptools import setup + print "Warning: DistUtilsExtra required to use i18n builders. " + print "To build nova with support for message catalogs, you need " + print " https://launchpad.net/python-distutils-extra >= 2.18" +gettext.install('nova', unicode=1) from nova.utils import parse_mailmap, str_dict_replace from nova import version @@ -100,7 +106,7 @@ def find_data_files(destdir, srcdir): package_data += [(destdir, files)] return package_data -DistUtilsExtra.auto.setup(name='nova', +setup(name='nova', version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', diff --git a/tools/esx/guest_tool.py b/tools/esx/guest_tool.py index bbf3ea908..13b0f8d33 100644 --- a/tools/esx/guest_tool.py +++ b/tools/esx/guest_tool.py @@ -209,7 +209,7 @@ def _execute(cmd_list, process_input=None, check_exit_code=True): obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
result = None
- if process_input != None:
+ if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
diff --git a/tools/eventlet-patch b/tools/eventlet-patch new file mode 100644 index 000000000..c87c5f279 --- /dev/null +++ b/tools/eventlet-patch @@ -0,0 +1,24 @@ +# HG changeset patch +# User Soren Hansen <soren@linux2go.dk> +# Date 1297678255 -3600 +# Node ID 4c846d555010bb5a91ab4da78dfe596451313742 +# Parent 5b7e9946c79f005c028eb63207cf5eb7bb21d1c3 +Don't attempt to wrap GreenPipes in GreenPipe + +If the os module is monkeypatched, Python's standard subprocess module +will return greenio.GreenPipe instances for Popen objects' stdin, stdout, +and stderr attributes. However, eventlet.green.subprocess tries to wrap +these attributes in another greenio.GreenPipe, which GreenPipe refuses. + +diff -r 5b7e9946c79f -r 4c846d555010 eventlet/green/subprocess.py +--- a/eventlet/green/subprocess.py Sat Feb 05 13:05:05 2011 -0800 ++++ b/eventlet/green/subprocess.py Mon Feb 14 11:10:55 2011 +0100 +@@ -27,7 +27,7 @@ + # eventlet.processes.Process.run() method. + for attr in "stdin", "stdout", "stderr": + pipe = getattr(self, attr) +- if pipe is not None: ++ if pipe is not None and not type(pipe) == greenio.GreenPipe: + wrapped_pipe = greenio.GreenPipe(pipe, pipe.mode, bufsize) + setattr(self, attr, wrapped_pipe) + __init__.__doc__ = subprocess_orig.Popen.__init__.__doc__ diff --git a/tools/install_venv.py b/tools/install_venv.py index 4e3941210..812b1dd0f 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -1,3 +1,4 @@ + # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the @@ -30,108 +31,125 @@ import sys ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) VENV = os.path.join(ROOT, '.nova-venv') PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') -TWISTED_NOVA='http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' +TWISTED_NOVA = 'http://nova.openstack.org/Twisted-10.0.0Nova.tar.gz' +PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + def die(message, *args): - print >>sys.stderr, message % args - sys.exit(1) + print >>sys.stderr, message % args + sys.exit(1) + + +def check_python_version(): + if sys.version_info < (2, 6): + die("Need Python Version >= 2.6") def run_command(cmd, redirect_output=True, check_exit_code=True): - """ - Runs a command in an out-of-process shell, returning the - output of that command. Working directory is ROOT. - """ - if redirect_output: - stdout = subprocess.PIPE - else: - stdout = None + """ + Runs a command in an out-of-process shell, returning the + output of that command. Working directory is ROOT. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None - proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) - output = proc.communicate()[0] - if check_exit_code and proc.returncode != 0: - die('Command "%s" failed.\n%s', ' '.join(cmd), output) - return output + proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return output -HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip()) -HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip()) +HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], + check_exit_code=False).strip()) +HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], + check_exit_code=False).strip()) def check_dependencies(): - """Make sure virtualenv is in the path.""" - - if not HAS_VIRTUALENV: - print 'not found.' - # Try installing it via easy_install... - if HAS_EASY_INSTALL: - print 'Installing virtualenv via easy_install...', - if not (run_command(['which', 'easy_install']) and - run_command(['easy_install', 'virtualenv'])): - die('ERROR: virtualenv not found.\n\nNova development requires virtualenv,' - ' please install it using your favorite package management tool') - print 'done.' - print 'done.' + """Make sure virtualenv is in the path.""" + + if not HAS_VIRTUALENV: + print 'not found.' + # Try installing it via easy_install... + if HAS_EASY_INSTALL: + print 'Installing virtualenv via easy_install...', + if not (run_command(['which', 'easy_install']) and + run_command(['easy_install', 'virtualenv'])): + die('ERROR: virtualenv not found.\n\nNova development' + ' requires virtualenv, please install it using your' + ' favorite package management tool') + print 'done.' + print 'done.' def create_virtualenv(venv=VENV): - """Creates the virtual environment and installs PIP only into the - virtual environment - """ - print 'Creating venv...', - run_command(['virtualenv', '-q', '--no-site-packages', VENV]) - print 'done.' - print 'Installing pip in virtualenv...', - if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): - die("Failed to install pip.") - print 'done.' + """Creates the virtual environment and installs PIP only into the + virtual environment + """ + print 'Creating venv...', + run_command(['virtualenv', '-q', '--no-site-packages', VENV]) + print 'done.' + print 'Installing pip in virtualenv...', + if not run_command(['tools/with_venv.sh', 'easy_install', 'pip']).strip(): + die("Failed to install pip.") + print 'done.' def install_dependencies(venv=VENV): - print 'Installing dependencies with pip (this can take a while)...' - # Install greenlet by hand - just listing it in the requires file does not - # get it in stalled in the right order - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, 'greenlet'], - redirect_output=False) - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', PIP_REQUIRES], - redirect_output=False) - run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, TWISTED_NOVA], - redirect_output=False) - - - # Tell the virtual env how to "import nova" - pthfile = os.path.join(venv, "lib", "python2.6", "site-packages", "nova.pth") - f = open(pthfile, 'w') - f.write("%s\n" % ROOT) + print 'Installing dependencies with pip (this can take a while)...' + # Install greenlet by hand - just listing it in the requires file does not + # get it in stalled in the right order + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, + 'greenlet'], redirect_output=False) + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, '-r', + PIP_REQUIRES], redirect_output=False) + run_command(['tools/with_venv.sh', 'pip', 'install', '-E', venv, + TWISTED_NOVA], redirect_output=False) + + # Tell the virtual env how to "import nova" + pthfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", + "nova.pth") + f = open(pthfile, 'w') + f.write("%s\n" % ROOT) + # Patch eventlet (see FAQ # 1485) + patchsrc = os.path.join(ROOT, 'tools', 'eventlet-patch') + patchfile = os.path.join(venv, "lib", PY_VERSION, "site-packages", + "eventlet", "green", "subprocess.py") + patch_cmd = "patch %s %s" % (patchfile, patchsrc) + os.system(patch_cmd) def print_help(): - help = """ - Nova development environment setup is complete. + help = """ + Nova development environment setup is complete. - Nova development uses virtualenv to track and manage Python dependencies - while in development and testing. + Nova development uses virtualenv to track and manage Python dependencies + while in development and testing. - To activate the Nova virtualenv for the extent of your current shell session - you can run: + To activate the Nova virtualenv for the extent of your current shell + session you can run: - $ source .nova-venv/bin/activate + $ source .nova-venv/bin/activate - Or, if you prefer, you can run commands in the virtualenv on a case by case - basis by running: + Or, if you prefer, you can run commands in the virtualenv on a case by case + basis by running: - $ tools/with_venv.sh <your command> + $ tools/with_venv.sh <your command> - Also, make test will automatically use the virtualenv. - """ - print help + Also, make test will automatically use the virtualenv. + """ + print help def main(argv): - check_dependencies() - create_virtualenv() - install_dependencies() - print_help() + check_python_version() + check_dependencies() + create_virtualenv() + install_dependencies() + print_help() if __name__ == '__main__': - main(sys.argv) + main(sys.argv) diff --git a/tools/pip-requires b/tools/pip-requires index 6ea446493..8f8018765 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -2,7 +2,7 @@ SQLAlchemy==0.6.3 pep8==0.5.0 pylint==0.19 IPy==0.70 -Cheetah==2.4.2.1 +Cheetah==2.4.4 M2Crypto==0.20.2 amqplib==0.6.1 anyjson==0.2.4 @@ -17,7 +17,6 @@ redis==2.0.0 routes==1.12.3 WebOb==0.9.8 wsgiref==0.1.2 -zope.interface==3.6.1 mox==0.5.0 -f http://pymox.googlecode.com/files/mox-0.5.0.tar.gz greenlet==0.3.1 @@ -32,3 +31,6 @@ sphinx glance nova-adminclient suds==0.4 +coverage +nosexcover +GitPython |
